svn commit: r40510 - in /dev/incubator/mxnet/1.7.0.rc1: ./ apache-mxnet-src-1.7.0.rc1-incubating.tar.gz apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.asc apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.sh

2020-07-16 Thread taolv
Author: taolv
Date: Thu Jul 16 08:15:47 2020
New Revision: 40510

Log:
Add mxnet-1.7.0.rc1

Added:
dev/incubator/mxnet/1.7.0.rc1/
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz  
 (with props)

dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.asc  
 (with props)

dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.sha512

Added: 
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz
--
svn:mime-type = application/x-gzip

Added: 
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.asc
==
Binary file - no diff available.

Propchange: 
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: 
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.sha512
==
--- 
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.sha512
 (added)
+++ 
dev/incubator/mxnet/1.7.0.rc1/apache-mxnet-src-1.7.0.rc1-incubating.tar.gz.sha512
 Thu Jul 16 08:15:47 2020
@@ -0,0 +1 @@
+ab996843374e7ccdf26ee875eacf639c1f30895df8b5d2c1830cbbfee6d097af0ee05279165845a2e15009f23d8401734129b4a46ef12acee39ea738e3c08032
  apache-mxnet-src-1.7.0.rc1-incubating.tar.gz




[incubator-mxnet] tag 1.7.0.rc1 created (now 64f737c)

2020-07-16 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to tag 1.7.0.rc1
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


  at 64f737c  (commit)
No new revisions were added by this update.



[incubator-mxnet] tag 1.7.0.rc1 created (now 64f737c)

2020-07-16 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to tag 1.7.0.rc1
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


  at 64f737c  (commit)
No new revisions were added by this update.



[incubator-mxnet] branch v1.7.x updated: [v1.6] Fix the monitor_callback invalid issue during calibration with variable input shapes (#18632) (#18703)

2020-07-15 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 64f737c  [v1.6] Fix the monitor_callback invalid issue during 
calibration with variable input shapes (#18632) (#18703)
64f737c is described below

commit 64f737cdd59fe88d2c5b479f25d011c5156b6a8a
Author: ciyong 
AuthorDate: Wed Jul 15 15:03:28 2020 +0800

[v1.6] Fix the monitor_callback invalid issue during calibration with 
variable input shapes (#18632) (#18703)

* Fix the monitor_callback invalid issue during calibration with variable 
input shapes

* retrigger CI

* Add UT for monitor check and disable codecov

Co-authored-by: Tao Lv 
---
 .codecov.yml   |  3 ++
 python/mxnet/executor.py   |  9 ++
 tests/python/unittest/test_operator.py | 53 ++
 3 files changed, 65 insertions(+)

diff --git a/.codecov.yml b/.codecov.yml
index 97624c2..70037e6 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -4,6 +4,9 @@ codecov:
 require_ci_to_pass: yes
 
 coverage:
+  status:
+project: off
+patch: off
   precision: 2
   round: down
   range: "70...100"
diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py
index 03fa812..d78d7e5 100644
--- a/python/mxnet/executor.py
+++ b/python/mxnet/executor.py
@@ -79,6 +79,7 @@ class Executor(object):
 self._aux_dict = None
 self._output_dict = None
 self._monitor_callback = None
+self._monitor_all = None
 self._ctx = copy.deepcopy(ctx)
 self._grad_req = copy.deepcopy(grad_req)
 self._group2ctx = copy.deepcopy(group2ctx)
@@ -253,6 +254,7 @@ class Executor(object):
 """
 cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, 
ctypes.c_void_p)
 self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
+self._monitor_all = monitor_all
 check_call(_LIB.MXExecutorSetMonitorCallbackEX(
 self.handle,
 self._monitor_callback,
@@ -477,6 +479,13 @@ class Executor(object):
 executor.arg_arrays = arg_arrays
 executor.grad_arrays = grad_arrays
 executor.aux_arrays = aux_arrays
+if (self._monitor_callback is not None) and (self._monitor_all is not 
None):
+# rebind callback to the new executor if the callback is valid
+check_call(_LIB.MXExecutorSetMonitorCallbackEX(
+handle,
+self._monitor_callback,
+None,
+ctypes.c_int(self._monitor_all)))
 return executor
 
 def debug_str(self):
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index a2739a3..b9e2422 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -8365,6 +8365,59 @@ def test_op_all_names_monitor():
 del os.environ['MXNET_SUBGRAPH_BACKEND']
 
 @with_seed()
+def test_monitor_with_variable_input_shape():
+output = {}
+
+def get_output_min_callback(name, arr):
+name = py_str(name)
+handle = ctypes.cast(arr, NDArrayHandle)
+arr = NDArray(handle, writable=False)
+min_val = mx.ndarray.min(arr).asscalar()
+if name in output:
+output[name] = min(output[name], min_val)
+else:
+output[name] = min_val
+
+def check_result(output, names):
+assert len(output) > 0
+for k, v in output.items():
+assert k in names
+assert v is not None
+
+is_windows = sys.platform.startswith('win')
+if (is_windows):
+# Windows doesn't support set environment variable on the fly, so 
disable it for now
+pass
+else:
+# Disable subgraph in case subgraph will replace symbol
+os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
+
+batch_size = 1
+op_name = 'conv'
+dshape = (batch_size, 3, 10, 10)
+data = mx.sym.Variable('data', shape=dshape)
+sym = mx.sym.Convolution(data, kernel=(1, 1), num_filter=1, 
name=op_name)
+
+mod = mx.module.Module(symbol=sym, label_names=None)
+mod.bind(for_training=False, data_shapes=[('data', dshape)])
+mod.init_params()
+mod._exec_group.execs[0].set_monitor_callback(get_output_min_callback, 
monitor_all=True)
+
+new_dshape = dshape[:-1] + (dshape[-1] + 4,)
+new_data = mx.nd.random.uniform(shape=new_dshape)
+new_data = mx.io.NDArrayIter(data=new_data, batch_size=batch_size)
+new_data = DummyIter(new_data)
+
+for batch in new_data:
+mod.forward(data_batch=batch, is_train=False)
+mx.nd.waitall()
+break
+
+name_list = ['data', 'conv_data', 'co

[incubator-mxnet] branch v1.7.x updated: [v1.6] Fix the monitor_callback invalid issue during calibration with variable input shapes (#18632) (#18703)

2020-07-15 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 64f737c  [v1.6] Fix the monitor_callback invalid issue during 
calibration with variable input shapes (#18632) (#18703)
64f737c is described below

commit 64f737cdd59fe88d2c5b479f25d011c5156b6a8a
Author: ciyong 
AuthorDate: Wed Jul 15 15:03:28 2020 +0800

[v1.6] Fix the monitor_callback invalid issue during calibration with 
variable input shapes (#18632) (#18703)

* Fix the monitor_callback invalid issue during calibration with variable 
input shapes

* retrigger CI

* Add UT for monitor check and disable codecov

Co-authored-by: Tao Lv 
---
 .codecov.yml   |  3 ++
 python/mxnet/executor.py   |  9 ++
 tests/python/unittest/test_operator.py | 53 ++
 3 files changed, 65 insertions(+)

diff --git a/.codecov.yml b/.codecov.yml
index 97624c2..70037e6 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -4,6 +4,9 @@ codecov:
 require_ci_to_pass: yes
 
 coverage:
+  status:
+project: off
+patch: off
   precision: 2
   round: down
   range: "70...100"
diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py
index 03fa812..d78d7e5 100644
--- a/python/mxnet/executor.py
+++ b/python/mxnet/executor.py
@@ -79,6 +79,7 @@ class Executor(object):
 self._aux_dict = None
 self._output_dict = None
 self._monitor_callback = None
+self._monitor_all = None
 self._ctx = copy.deepcopy(ctx)
 self._grad_req = copy.deepcopy(grad_req)
 self._group2ctx = copy.deepcopy(group2ctx)
@@ -253,6 +254,7 @@ class Executor(object):
 """
 cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, 
ctypes.c_void_p)
 self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
+self._monitor_all = monitor_all
 check_call(_LIB.MXExecutorSetMonitorCallbackEX(
 self.handle,
 self._monitor_callback,
@@ -477,6 +479,13 @@ class Executor(object):
 executor.arg_arrays = arg_arrays
 executor.grad_arrays = grad_arrays
 executor.aux_arrays = aux_arrays
+if (self._monitor_callback is not None) and (self._monitor_all is not 
None):
+# rebind callback to the new executor if the callback is valid
+check_call(_LIB.MXExecutorSetMonitorCallbackEX(
+handle,
+self._monitor_callback,
+None,
+ctypes.c_int(self._monitor_all)))
 return executor
 
 def debug_str(self):
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index a2739a3..b9e2422 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -8365,6 +8365,59 @@ def test_op_all_names_monitor():
 del os.environ['MXNET_SUBGRAPH_BACKEND']
 
 @with_seed()
+def test_monitor_with_variable_input_shape():
+output = {}
+
+def get_output_min_callback(name, arr):
+name = py_str(name)
+handle = ctypes.cast(arr, NDArrayHandle)
+arr = NDArray(handle, writable=False)
+min_val = mx.ndarray.min(arr).asscalar()
+if name in output:
+output[name] = min(output[name], min_val)
+else:
+output[name] = min_val
+
+def check_result(output, names):
+assert len(output) > 0
+for k, v in output.items():
+assert k in names
+assert v is not None
+
+is_windows = sys.platform.startswith('win')
+if (is_windows):
+# Windows doesn't support set environment variable on the fly, so 
disable it for now
+pass
+else:
+# Disable subgraph in case subgraph will replace symbol
+os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
+
+batch_size = 1
+op_name = 'conv'
+dshape = (batch_size, 3, 10, 10)
+data = mx.sym.Variable('data', shape=dshape)
+sym = mx.sym.Convolution(data, kernel=(1, 1), num_filter=1, 
name=op_name)
+
+mod = mx.module.Module(symbol=sym, label_names=None)
+mod.bind(for_training=False, data_shapes=[('data', dshape)])
+mod.init_params()
+mod._exec_group.execs[0].set_monitor_callback(get_output_min_callback, 
monitor_all=True)
+
+new_dshape = dshape[:-1] + (dshape[-1] + 4,)
+new_data = mx.nd.random.uniform(shape=new_dshape)
+new_data = mx.io.NDArrayIter(data=new_data, batch_size=batch_size)
+new_data = DummyIter(new_data)
+
+for batch in new_data:
+mod.forward(data_batch=batch, is_train=False)
+mx.nd.waitall()
+break
+
+name_list = ['data', 'conv_data', 'co

[incubator-mxnet] branch v1.7.x updated (c4c7b11 -> 61597a5)

2020-07-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from c4c7b11  Revert "Fix memory leaks in Gluon (#18328) (#18358)" (#18692)
 add 61597a5  revise activations (#18700)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/gluon/nn/activations.py  | 18 +++
 tests/python/unittest/test_numpy_gluon.py | 50 +++
 2 files changed, 63 insertions(+), 5 deletions(-)



[incubator-mxnet] branch v1.7.x updated (c4c7b11 -> 61597a5)

2020-07-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from c4c7b11  Revert "Fix memory leaks in Gluon (#18328) (#18358)" (#18692)
 add 61597a5  revise activations (#18700)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/gluon/nn/activations.py  | 18 +++
 tests/python/unittest/test_numpy_gluon.py | 50 +++
 2 files changed, 63 insertions(+), 5 deletions(-)



[incubator-mxnet] branch v1.7.x updated (c4c7b11 -> 61597a5)

2020-07-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from c4c7b11  Revert "Fix memory leaks in Gluon (#18328) (#18358)" (#18692)
 add 61597a5  revise activations (#18700)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/gluon/nn/activations.py  | 18 +++
 tests/python/unittest/test_numpy_gluon.py | 50 +++
 2 files changed, 63 insertions(+), 5 deletions(-)



[incubator-mxnet] branch v1.7.x updated (c4c7b11 -> 61597a5)

2020-07-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from c4c7b11  Revert "Fix memory leaks in Gluon (#18328) (#18358)" (#18692)
 add 61597a5  revise activations (#18700)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/gluon/nn/activations.py  | 18 +++
 tests/python/unittest/test_numpy_gluon.py | 50 +++
 2 files changed, 63 insertions(+), 5 deletions(-)



[incubator-mxnet] branch v1.7.x updated: revise activations (#18700)

2020-07-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 61597a5  revise activations (#18700)
61597a5 is described below

commit 61597a570bbd1314808969203398232fcd284b3c
Author: Xingjian Shi 
AuthorDate: Mon Jul 13 23:50:21 2020 -0700

revise activations (#18700)
---
 python/mxnet/gluon/nn/activations.py  | 18 +++
 tests/python/unittest/test_numpy_gluon.py | 50 +++
 2 files changed, 63 insertions(+), 5 deletions(-)

diff --git a/python/mxnet/gluon/nn/activations.py 
b/python/mxnet/gluon/nn/activations.py
index 1b9ce91..385 100644
--- a/python/mxnet/gluon/nn/activations.py
+++ b/python/mxnet/gluon/nn/activations.py
@@ -139,7 +139,8 @@ class PReLU(HybridBlock):
  init=alpha_initializer)
 
 def hybrid_forward(self, F, x, alpha):
-return F.LeakyReLU(x, gamma=alpha, act_type='prelu', name='fwd')
+leaky_relu = F.npx.leaky_relu if is_np_array() else F.LeakyReLU
+return leaky_relu(x, gamma=alpha, act_type='prelu', name='fwd')
 
 
 class ELU(HybridBlock):
@@ -167,7 +168,8 @@ class ELU(HybridBlock):
 self._alpha = alpha
 
 def hybrid_forward(self, F, x):
-return F.LeakyReLU(x, act_type='elu', slope=self._alpha)
+leaky_relu = F.npx.leaky_relu if is_np_array() else F.LeakyReLU
+return leaky_relu(x, act_type='elu', slope=self._alpha)
 
 
 class SELU(HybridBlock):
@@ -187,7 +189,9 @@ class SELU(HybridBlock):
 super(SELU, self).__init__(**kwargs)
 
 def hybrid_forward(self, F, x):
-return F.LeakyReLU(x, act_type='selu', name='fwd')
+leaky_relu = F.npx.leaky_relu if is_np_array() else F.LeakyReLU
+return leaky_relu(x, act_type='selu', name='fwd')
+
 
 class GELU(HybridBlock):
 r"""
@@ -206,7 +210,8 @@ class GELU(HybridBlock):
 super(GELU, self).__init__(**kwargs)
 
 def hybrid_forward(self, F, x):
-return F.LeakyReLU(x, act_type='gelu', name='fwd')
+leaky_relu = F.npx.leaky_relu if is_np_array() else F.LeakyReLU
+return leaky_relu(x, act_type='gelu', name='fwd')
 
 
 class Swish(HybridBlock):
@@ -232,4 +237,7 @@ class Swish(HybridBlock):
 self._beta = beta
 
 def hybrid_forward(self, F, x):
-return x * F.sigmoid(self._beta * x, name='fwd')
+if is_np_array():
+return x * F.npx.sigmoid(self._beta * x)
+else:
+return x * F.sigmoid(self._beta * x, name='fwd')
diff --git a/tests/python/unittest/test_numpy_gluon.py 
b/tests/python/unittest/test_numpy_gluon.py
index 0d1e5fe..2f2e2e0 100644
--- a/tests/python/unittest/test_numpy_gluon.py
+++ b/tests/python/unittest/test_numpy_gluon.py
@@ -25,6 +25,7 @@ import numpy as _np
 import mxnet as mx
 from mxnet import gluon, autograd, np
 from mxnet.test_utils import use_np, assert_almost_equal, 
check_gluon_hybridize_consistency
+from mxnet.gluon import nn
 from common import with_seed
 import random
 
@@ -422,6 +423,55 @@ def test_hybridize_boolean_dtype():
 assert mx.test_utils.same(out1.asnumpy(), out2.asnumpy())
 
 
+@with_seed()
+@use_np
+def test_activations_leakyrelu():
+# Currently, all the activation tests, we will just test for runnable.
+act_layer = nn.LeakyReLU(0.1)
+out = act_layer(mx.np.random.uniform(size=(10,)))
+out.asnumpy()
+
+
+@with_seed()
+@use_np
+def test_activations_prelu():
+act_layer = nn.PReLU()
+act_layer.initialize()
+out = act_layer(mx.np.random.uniform(size=(10,)))
+out.asnumpy()
+
+
+@with_seed()
+@use_np
+def test_activations_elu():
+act_layer = nn.ELU(1.0)
+out = act_layer(mx.np.random.uniform(size=(10,)))
+out.asnumpy()
+
+
+@with_seed()
+@use_np
+def test_activations_selu():
+act_layer = nn.SELU()
+out = act_layer(mx.np.random.uniform(size=(10,)))
+out.asnumpy()
+
+
+@with_seed()
+@use_np
+def test_activations_gelu():
+act_layer = nn.GELU()
+out = act_layer(mx.np.random.uniform(size=(10,)))
+out.asnumpy()
+
+
+@with_seed()
+@use_np
+def test_activations_swish():
+act_layer = nn.Swish()
+out = act_layer(mx.np.random.uniform(size=(10,)))
+out.asnumpy()
+
 if __name__ == '__main__':
 import nose
 nose.runmodule()



[incubator-mxnet] branch v1.x updated: [v1.x] Backport of Fix BatchNorm backward synchronization (#18644) (#18654)

2020-07-08 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
 new 024daa6  [v1.x] Backport of Fix BatchNorm backward synchronization 
(#18644) (#18654)
024daa6 is described below

commit 024daa6b56fab4b96f135fd0c5c9489505ba307a
Author: Andrzej Kotłowski 
AuthorDate: Thu Jul 9 06:39:38 2020 +0200

[v1.x] Backport of Fix BatchNorm backward synchronization (#18644) (#18654)

* Add test for BatchNorm running variables synchronization

* Fix BatchNorm backward synchronization

It fixes issue #18610
---
 src/operator/nn/batch_norm.cc   |  3 +++
 tests/python/unittest/test_gluon.py | 26 ++
 2 files changed, 29 insertions(+)

diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index af8f25a..3e36559 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -640,6 +640,9 @@ then set ``gamma`` to 1 and its gradient to 0.
 NNVM_REGISTER_OP(_backward_BatchNorm)
 .set_num_inputs(8)
 .set_num_outputs(3)
+.set_attr("FMutateInputs", [](const nnvm::NodeAttrs& 
attrs) {
+  return std::vector{6, 7};   // moving_mean, moving_var
+})
 .set_attr("TIsBackward", true)
 .set_attr("FInferStorageType", BatchNormStorageType)
 #if MXNET_USE_MKLDNN == 1
diff --git a/tests/python/unittest/test_gluon.py 
b/tests/python/unittest/test_gluon.py
index cf6bc36..60fd526 100644
--- a/tests/python/unittest/test_gluon.py
+++ b/tests/python/unittest/test_gluon.py
@@ -759,6 +759,32 @@ def test_pool():
 
 
 @with_seed()
+def test_batchnorm_backward_synchronization():
+"""
+Tests if synchronization of BatchNorm running variables is done correctly.
+If not, the test sometimes fails - depending on the timing.
+"""
+ctx = mx.test_utils.default_context()
+
+for variable in ['running_var', 'running_mean']:
+for _ in range(20):
+layer = nn.BatchNorm()
+layer.initialize(ctx=ctx)
+for _ in range(3):
+data = mx.nd.random.normal(loc=10, scale=2, shape=(1, 3, 10, 
10), ctx=ctx)
+with mx.autograd.record():
+out = layer(data)
+out.backward()
+
+# check if each read give the same value
+var1 = getattr(layer, variable).data().asnumpy()
+for _ in range(10):
+var2 = getattr(layer, variable).data().asnumpy()
+if (var1 != var2).any():
+raise AssertionError("Two consecutive reads of " + 
variable + " give different results")
+
+
+@with_seed()
 def test_batchnorm():
 layer = nn.BatchNorm(in_channels=10)
 check_layer_forward(layer, (2, 10, 10, 10))



[incubator-mxnet] branch v1.x updated: [v1.x] Backport of Fix BatchNorm backward synchronization (#18644) (#18654)

2020-07-08 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
 new 024daa6  [v1.x] Backport of Fix BatchNorm backward synchronization 
(#18644) (#18654)
024daa6 is described below

commit 024daa6b56fab4b96f135fd0c5c9489505ba307a
Author: Andrzej Kotłowski 
AuthorDate: Thu Jul 9 06:39:38 2020 +0200

[v1.x] Backport of Fix BatchNorm backward synchronization (#18644) (#18654)

* Add test for BatchNorm running variables synchronization

* Fix BatchNorm backward synchronization

It fixes issue #18610
---
 src/operator/nn/batch_norm.cc   |  3 +++
 tests/python/unittest/test_gluon.py | 26 ++
 2 files changed, 29 insertions(+)

diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index af8f25a..3e36559 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -640,6 +640,9 @@ then set ``gamma`` to 1 and its gradient to 0.
 NNVM_REGISTER_OP(_backward_BatchNorm)
 .set_num_inputs(8)
 .set_num_outputs(3)
+.set_attr("FMutateInputs", [](const nnvm::NodeAttrs& 
attrs) {
+  return std::vector{6, 7};   // moving_mean, moving_var
+})
 .set_attr("TIsBackward", true)
 .set_attr("FInferStorageType", BatchNormStorageType)
 #if MXNET_USE_MKLDNN == 1
diff --git a/tests/python/unittest/test_gluon.py 
b/tests/python/unittest/test_gluon.py
index cf6bc36..60fd526 100644
--- a/tests/python/unittest/test_gluon.py
+++ b/tests/python/unittest/test_gluon.py
@@ -759,6 +759,32 @@ def test_pool():
 
 
 @with_seed()
+def test_batchnorm_backward_synchronization():
+"""
+Tests if synchronization of BatchNorm running variables is done correctly.
+If not, the test sometimes fails - depending on the timing.
+"""
+ctx = mx.test_utils.default_context()
+
+for variable in ['running_var', 'running_mean']:
+for _ in range(20):
+layer = nn.BatchNorm()
+layer.initialize(ctx=ctx)
+for _ in range(3):
+data = mx.nd.random.normal(loc=10, scale=2, shape=(1, 3, 10, 
10), ctx=ctx)
+with mx.autograd.record():
+out = layer(data)
+out.backward()
+
+# check if each read give the same value
+var1 = getattr(layer, variable).data().asnumpy()
+for _ in range(10):
+var2 = getattr(layer, variable).data().asnumpy()
+if (var1 != var2).any():
+raise AssertionError("Two consecutive reads of " + 
variable + " give different results")
+
+
+@with_seed()
 def test_batchnorm():
 layer = nn.BatchNorm(in_channels=10)
 check_layer_forward(layer, (2, 10, 10, 10))



svn commit: r40320 - in /dev/incubator/mxnet/1.7.0.rc0: ./ apache-mxnet-src-1.7.0.rc0-incubating.tar.gz apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.asc apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.sh

2020-07-06 Thread taolv
Author: taolv
Date: Mon Jul  6 13:26:45 2020
New Revision: 40320

Log:
Add mxnet-1.7.0.rc0

Added:
dev/incubator/mxnet/1.7.0.rc0/
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz  
 (with props)

dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.asc  
 (with props)

dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.sha512

Added: 
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz
--
svn:mime-type = application/x-gzip

Added: 
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.asc
==
Binary file - no diff available.

Propchange: 
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: 
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.sha512
==
--- 
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.sha512
 (added)
+++ 
dev/incubator/mxnet/1.7.0.rc0/apache-mxnet-src-1.7.0.rc0-incubating.tar.gz.sha512
 Mon Jul  6 13:26:45 2020
@@ -0,0 +1 @@
+67401ff5d0ed3e84cbf82e7d2903caea07c21627c1dcf7af708a74b8e85ecb1c36df37bf20c2aa1666bbe71dcae855b5c3e7863de6abccd3a89516c5e3ea58da
  apache-mxnet-src-1.7.0.rc0-incubating.tar.gz




[incubator-mxnet] tag 1.7.0.rc0 created (now 477affe)

2020-07-06 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to tag 1.7.0.rc0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


  at 477affe  (commit)
No new revisions were added by this update.



[incubator-mxnet] tag 1.7.0.rc0 created (now 477affe)

2020-07-06 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to tag 1.7.0.rc0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


  at 477affe  (commit)
No new revisions were added by this update.



[incubator-mxnet] tag 1.7.0.rc0 created (now 477affe)

2020-07-06 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to tag 1.7.0.rc0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


  at 477affe  (commit)
No new revisions were added by this update.



[incubator-mxnet] branch v1.7.x updated: Increase staggered build timeout to 180 min (#18568) (#18585)

2020-07-02 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new c2ddd81  Increase staggered build timeout to 180 min (#18568) (#18585)
c2ddd81 is described below

commit c2ddd81fb82e3e0aa6d57410fa0193580aaf6d0c
Author: Joe Evans 
AuthorDate: Thu Jul 2 07:49:00 2020 -0700

Increase staggered build timeout to 180 min (#18568) (#18585)

* Increase staggered build timeout to 180 min, since sanity build has 180 
min timeout.

* Decrease timeout so everyone is happy.

Co-authored-by: Joe Evans 

Co-authored-by: Joe Evans 
Co-authored-by: Tao Lv 
---
 ci/jenkins/Jenkinsfile_full   | 2 +-
 ci/jenkins/Jenkinsfile_sanity | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/ci/jenkins/Jenkinsfile_full b/ci/jenkins/Jenkinsfile_full
index 33d57d2..415bd7b 100644
--- a/ci/jenkins/Jenkinsfile_full
+++ b/ci/jenkins/Jenkinsfile_full
@@ -21,7 +21,7 @@
 // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
 
 // timeout in minutes
-def max_time = 30
+def max_time = 60
 
 def buildJobs = [
 'centos-cpu',
diff --git a/ci/jenkins/Jenkinsfile_sanity b/ci/jenkins/Jenkinsfile_sanity
index ed4d16e..065202c 100644
--- a/ci/jenkins/Jenkinsfile_sanity
+++ b/ci/jenkins/Jenkinsfile_sanity
@@ -21,7 +21,7 @@
 // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
 
 // timeout in minutes
-max_time = 180
+max_time = 60
 
 node('utility') {
   // Loading the utilities requires a node context unfortunately



[incubator-mxnet] branch v1.7.x updated: Increase staggered build timeout to 180 min (#18568) (#18585)

2020-07-02 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new c2ddd81  Increase staggered build timeout to 180 min (#18568) (#18585)
c2ddd81 is described below

commit c2ddd81fb82e3e0aa6d57410fa0193580aaf6d0c
Author: Joe Evans 
AuthorDate: Thu Jul 2 07:49:00 2020 -0700

Increase staggered build timeout to 180 min (#18568) (#18585)

* Increase staggered build timeout to 180 min, since sanity build has 180 
min timeout.

* Decrease timeout so everyone is happy.

Co-authored-by: Joe Evans 

Co-authored-by: Joe Evans 
Co-authored-by: Tao Lv 
---
 ci/jenkins/Jenkinsfile_full   | 2 +-
 ci/jenkins/Jenkinsfile_sanity | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/ci/jenkins/Jenkinsfile_full b/ci/jenkins/Jenkinsfile_full
index 33d57d2..415bd7b 100644
--- a/ci/jenkins/Jenkinsfile_full
+++ b/ci/jenkins/Jenkinsfile_full
@@ -21,7 +21,7 @@
 // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
 
 // timeout in minutes
-def max_time = 30
+def max_time = 60
 
 def buildJobs = [
 'centos-cpu',
diff --git a/ci/jenkins/Jenkinsfile_sanity b/ci/jenkins/Jenkinsfile_sanity
index ed4d16e..065202c 100644
--- a/ci/jenkins/Jenkinsfile_sanity
+++ b/ci/jenkins/Jenkinsfile_sanity
@@ -21,7 +21,7 @@
 // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
 
 // timeout in minutes
-max_time = 180
+max_time = 60
 
 node('utility') {
   // Loading the utilities requires a node context unfortunately



[incubator-mxnet] branch v1.7.x updated: Increase staggered build timeout to 180 min (#18568) (#18585)

2020-07-02 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new c2ddd81  Increase staggered build timeout to 180 min (#18568) (#18585)
c2ddd81 is described below

commit c2ddd81fb82e3e0aa6d57410fa0193580aaf6d0c
Author: Joe Evans 
AuthorDate: Thu Jul 2 07:49:00 2020 -0700

Increase staggered build timeout to 180 min (#18568) (#18585)

* Increase staggered build timeout to 180 min, since sanity build has 180 
min timeout.

* Decrease timeout so everyone is happy.

Co-authored-by: Joe Evans 

Co-authored-by: Joe Evans 
Co-authored-by: Tao Lv 
---
 ci/jenkins/Jenkinsfile_full   | 2 +-
 ci/jenkins/Jenkinsfile_sanity | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/ci/jenkins/Jenkinsfile_full b/ci/jenkins/Jenkinsfile_full
index 33d57d2..415bd7b 100644
--- a/ci/jenkins/Jenkinsfile_full
+++ b/ci/jenkins/Jenkinsfile_full
@@ -21,7 +21,7 @@
 // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
 
 // timeout in minutes
-def max_time = 30
+def max_time = 60
 
 def buildJobs = [
 'centos-cpu',
diff --git a/ci/jenkins/Jenkinsfile_sanity b/ci/jenkins/Jenkinsfile_sanity
index ed4d16e..065202c 100644
--- a/ci/jenkins/Jenkinsfile_sanity
+++ b/ci/jenkins/Jenkinsfile_sanity
@@ -21,7 +21,7 @@
 // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
 
 // timeout in minutes
-max_time = 180
+max_time = 60
 
 node('utility') {
   // Loading the utilities requires a node context unfortunately



[incubator-mxnet] branch v1.7.x updated: [v1.7.x] Backport PRs of numpy features (#18653)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 802e5af  [v1.7.x] Backport PRs of numpy features (#18653)
802e5af is described below

commit 802e5af5d0133af88c3c166f6e4fe99508eff42b
Author: Xingjian Shi 
AuthorDate: Wed Jul 1 22:41:24 2020 -0700

[v1.7.x] Backport PRs of numpy features (#18653)

* add zero grad for npi_unique (#18080)

* fix np.clip scalar input case (#17788)

* fix true_divide (#18393)

Co-authored-by: Hao Jin 
Co-authored-by: Xi Wang 
---
 python/mxnet/numpy/multiarray.py|  5 +
 src/operator/numpy/np_true_divide-inl.h | 16 
 src/operator/numpy/np_unique_op.cc  |  1 +
 tests/python/unittest/test_numpy_op.py  | 12 +++-
 4 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py
index fceaaf3..9a803d4 100644
--- a/python/mxnet/numpy/multiarray.py
+++ b/python/mxnet/numpy/multiarray.py
@@ -6174,6 +6174,11 @@ def clip(a, a_min, a_max, out=None):
 >>> np.clip(a, 3, 6, out=a)
 array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
 """
+from numbers import Number
+if isinstance(a, Number):
+# In case input is a scalar, the computation would fall back to native 
numpy.
+# The value returned would be a python scalar.
+return _np.clip(a, a_min, a_max, out=None)
 return _mx_nd_np.clip(a, a_min, a_max, out=out)
 
 
diff --git a/src/operator/numpy/np_true_divide-inl.h 
b/src/operator/numpy/np_true_divide-inl.h
index 0bc60a0..be2ce51 100644
--- a/src/operator/numpy/np_true_divide-inl.h
+++ b/src/operator/numpy/np_true_divide-inl.h
@@ -121,7 +121,7 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs ,
 // Case when types of the 2 input tensors are different
 if (common::is_float(lhs.type_flag_) && common::is_float(rhs.type_flag_)) {
   // both lhs and rhs are float types, output type is the more precise one
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
   // one is float type, the other is integer type, the output type should 
be the same as float
   CHECK_EQ(out.type_flag_,
@@ -150,14 +150,14 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs 
,
   }
 } else {
   // lhs is integer type, rhs is integer type, output type should be float
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 }
 #else
 // Windows case: using temp space for casting the type
 // Case when types of the 2 input tensors are different
 if (common::is_float(lhs.type_flag_) && common::is_float(rhs.type_flag_)) {
   // both lhs and rhs are float types, output type is the more precise one
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
   // lhs is float type, rhs is integer type, the output type should be the 
same as lhs
   CHECK_EQ(out.type_flag_,
@@ -187,7 +187,7 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs ,
   }
 } else {
   // lhs is integer type, rhs is integer type, output type should be float
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 }
 #endif
   }
@@ -241,7 +241,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
   } else {
 if (common::is_float(lhs.type_flag_) && 
common::is_float(rhs.type_flag_)) {
   // lhs and rhs have different float types, the output is the more 
precise one
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
   // one of lhs and rhs is float, the output is the same type as the 
float one
   if (common::is_float(lhs.type_flag_)) {
@@ -269,7 +269,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
   }
 } else {
   // lhs and rhs have different integer types, the output is float type
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 }
   }
 });
@@ -302,7 +302,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
 } else {
   if (common::is_float(lhs.type_flag_

[incubator-mxnet] branch v1.7.x updated: [v1.7.x] Backport PRs of numpy features (#18653)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 802e5af  [v1.7.x] Backport PRs of numpy features (#18653)
802e5af is described below

commit 802e5af5d0133af88c3c166f6e4fe99508eff42b
Author: Xingjian Shi 
AuthorDate: Wed Jul 1 22:41:24 2020 -0700

[v1.7.x] Backport PRs of numpy features (#18653)

* add zero grad for npi_unique (#18080)

* fix np.clip scalar input case (#17788)

* fix true_divide (#18393)

Co-authored-by: Hao Jin 
Co-authored-by: Xi Wang 
---
 python/mxnet/numpy/multiarray.py|  5 +
 src/operator/numpy/np_true_divide-inl.h | 16 
 src/operator/numpy/np_unique_op.cc  |  1 +
 tests/python/unittest/test_numpy_op.py  | 12 +++-
 4 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py
index fceaaf3..9a803d4 100644
--- a/python/mxnet/numpy/multiarray.py
+++ b/python/mxnet/numpy/multiarray.py
@@ -6174,6 +6174,11 @@ def clip(a, a_min, a_max, out=None):
 >>> np.clip(a, 3, 6, out=a)
 array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
 """
+from numbers import Number
+if isinstance(a, Number):
+# In case input is a scalar, the computation would fall back to native 
numpy.
+# The value returned would be a python scalar.
+return _np.clip(a, a_min, a_max, out=None)
 return _mx_nd_np.clip(a, a_min, a_max, out=out)
 
 
diff --git a/src/operator/numpy/np_true_divide-inl.h 
b/src/operator/numpy/np_true_divide-inl.h
index 0bc60a0..be2ce51 100644
--- a/src/operator/numpy/np_true_divide-inl.h
+++ b/src/operator/numpy/np_true_divide-inl.h
@@ -121,7 +121,7 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs ,
 // Case when types of the 2 input tensors are different
 if (common::is_float(lhs.type_flag_) && common::is_float(rhs.type_flag_)) {
   // both lhs and rhs are float types, output type is the more precise one
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
   // one is float type, the other is integer type, the output type should 
be the same as float
   CHECK_EQ(out.type_flag_,
@@ -150,14 +150,14 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs 
,
   }
 } else {
   // lhs is integer type, rhs is integer type, output type should be float
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 }
 #else
 // Windows case: using temp space for casting the type
 // Case when types of the 2 input tensors are different
 if (common::is_float(lhs.type_flag_) && common::is_float(rhs.type_flag_)) {
   // both lhs and rhs are float types, output type is the more precise one
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
   // lhs is float type, rhs is integer type, the output type should be the 
same as lhs
   CHECK_EQ(out.type_flag_,
@@ -187,7 +187,7 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs ,
   }
 } else {
   // lhs is integer type, rhs is integer type, output type should be float
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 }
 #endif
   }
@@ -241,7 +241,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
   } else {
 if (common::is_float(lhs.type_flag_) && 
common::is_float(rhs.type_flag_)) {
   // lhs and rhs have different float types, the output is the more 
precise one
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
   // one of lhs and rhs is float, the output is the same type as the 
float one
   if (common::is_float(lhs.type_flag_)) {
@@ -269,7 +269,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
   }
 } else {
   // lhs and rhs have different integer types, the output is float type
-  LOG(ERROR) << "not implemented yet...";
+  LOG(FATAL) << "not implemented yet...";
 }
   }
 });
@@ -302,7 +302,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
 } else {
   if (common::is_float(lhs.type_flag_

[incubator-mxnet] branch v1.6.x updated: [v1.6] Fix the monitor_callback invalid issue during calibration with variable input shapes (#18632)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.6.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.6.x by this push:
 new e503704  [v1.6] Fix the monitor_callback invalid issue during 
calibration with variable input shapes (#18632)
e503704 is described below

commit e503704bb13bd1873572cf081a7566416f72b7b8
Author: ciyong 
AuthorDate: Thu Jul 2 13:20:29 2020 +0800

[v1.6] Fix the monitor_callback invalid issue during calibration with 
variable input shapes (#18632)

* Fix the monitor_callback invalid issue during calibration with variable 
input shapes

* retrigger CI

* Add UT for monitor check and disable codecov
---
 .codecov.yml   |  3 ++
 python/mxnet/executor.py   |  9 ++
 tests/python/unittest/test_operator.py | 55 +-
 3 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/.codecov.yml b/.codecov.yml
index 97624c2..70037e6 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -4,6 +4,9 @@ codecov:
 require_ci_to_pass: yes
 
 coverage:
+  status:
+project: off
+patch: off
   precision: 2
   round: down
   range: "70...100"
diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py
index 3b79f0c..19ef5bb 100644
--- a/python/mxnet/executor.py
+++ b/python/mxnet/executor.py
@@ -79,6 +79,7 @@ class Executor(object):
 self._aux_dict = None
 self._output_dict = None
 self._monitor_callback = None
+self._monitor_all = None
 self._ctx = copy.deepcopy(ctx)
 self._grad_req = copy.deepcopy(grad_req)
 self._group2ctx = copy.deepcopy(group2ctx)
@@ -253,6 +254,7 @@ class Executor(object):
 """
 cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, 
ctypes.c_void_p)
 self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
+self._monitor_all = monitor_all
 check_call(_LIB.MXExecutorSetMonitorCallbackEX(
 self.handle,
 self._monitor_callback,
@@ -469,6 +471,13 @@ class Executor(object):
 executor.arg_arrays = arg_arrays
 executor.grad_arrays = grad_arrays
 executor.aux_arrays = aux_arrays
+if (self._monitor_callback is not None) and (self._monitor_all is not 
None):
+# rebind callback to the new executor if the callback is valid
+check_call(_LIB.MXExecutorSetMonitorCallbackEX(
+handle,
+self._monitor_callback,
+None,
+ctypes.c_int(self._monitor_all)))
 return executor
 
 def debug_str(self):
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index 37f7376..39fd16d 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -8270,6 +8270,59 @@ def test_op_all_names_monitor():
 del os.environ['MXNET_SUBGRAPH_BACKEND']
 
 @with_seed()
+def test_monitor_with_variable_input_shape():
+output = {}
+
+def get_output_min_callback(name, arr):
+name = py_str(name)
+handle = ctypes.cast(arr, NDArrayHandle)
+arr = NDArray(handle, writable=False)
+min_val = mx.ndarray.min(arr).asscalar()
+if name in output:
+output[name] = min(output[name], min_val)
+else:
+output[name] = min_val
+
+def check_result(output, names):
+assert len(output) > 0
+for k, v in output.items():
+assert k in names
+assert v is not None
+
+is_windows = sys.platform.startswith('win')
+if (is_windows):
+# Windows doesn't support set environment variable on the fly, so 
disable it for now
+pass
+else:
+# Disable subgraph in case subgraph will replace symbol
+os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
+
+batch_size = 1
+op_name = 'conv'
+dshape = (batch_size, 3, 10, 10)
+data = mx.sym.Variable('data', shape=dshape)
+sym = mx.sym.Convolution(data, kernel=(1, 1), num_filter=1, 
name=op_name)
+
+mod = mx.module.Module(symbol=sym, label_names=None)
+mod.bind(for_training=False, data_shapes=[('data', dshape)])
+mod.init_params()
+mod._exec_group.execs[0].set_monitor_callback(get_output_min_callback, 
monitor_all=True)
+
+new_dshape = dshape[:-1] + (dshape[-1] + 4,)
+new_data = mx.nd.random.uniform(shape=new_dshape)
+new_data = mx.io.NDArrayIter(data=new_data, batch_size=batch_size)
+new_data = DummyIter(new_data)
+
+for batch in new_data:
+mod.forward(data_batch=batch, is_train=False)
+mx.nd.waitall()
+break
+
+name_list = ['data', 'conv_data', 'conv_weight', 'conv_bias', 
'conv_output']
+

[incubator-mxnet] branch v1.6.x updated: [v1.6] Fix the monitor_callback invalid issue during calibration with variable input shapes (#18632)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.6.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.6.x by this push:
 new e503704  [v1.6] Fix the monitor_callback invalid issue during 
calibration with variable input shapes (#18632)
e503704 is described below

commit e503704bb13bd1873572cf081a7566416f72b7b8
Author: ciyong 
AuthorDate: Thu Jul 2 13:20:29 2020 +0800

[v1.6] Fix the monitor_callback invalid issue during calibration with 
variable input shapes (#18632)

* Fix the monitor_callback invalid issue during calibration with variable 
input shapes

* retrigger CI

* Add UT for monitor check and disable codecov
---
 .codecov.yml   |  3 ++
 python/mxnet/executor.py   |  9 ++
 tests/python/unittest/test_operator.py | 55 +-
 3 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/.codecov.yml b/.codecov.yml
index 97624c2..70037e6 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -4,6 +4,9 @@ codecov:
 require_ci_to_pass: yes
 
 coverage:
+  status:
+project: off
+patch: off
   precision: 2
   round: down
   range: "70...100"
diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py
index 3b79f0c..19ef5bb 100644
--- a/python/mxnet/executor.py
+++ b/python/mxnet/executor.py
@@ -79,6 +79,7 @@ class Executor(object):
 self._aux_dict = None
 self._output_dict = None
 self._monitor_callback = None
+self._monitor_all = None
 self._ctx = copy.deepcopy(ctx)
 self._grad_req = copy.deepcopy(grad_req)
 self._group2ctx = copy.deepcopy(group2ctx)
@@ -253,6 +254,7 @@ class Executor(object):
 """
 cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, 
ctypes.c_void_p)
 self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
+self._monitor_all = monitor_all
 check_call(_LIB.MXExecutorSetMonitorCallbackEX(
 self.handle,
 self._monitor_callback,
@@ -469,6 +471,13 @@ class Executor(object):
 executor.arg_arrays = arg_arrays
 executor.grad_arrays = grad_arrays
 executor.aux_arrays = aux_arrays
+if (self._monitor_callback is not None) and (self._monitor_all is not 
None):
+# rebind callback to the new executor if the callback is valid
+check_call(_LIB.MXExecutorSetMonitorCallbackEX(
+handle,
+self._monitor_callback,
+None,
+ctypes.c_int(self._monitor_all)))
 return executor
 
 def debug_str(self):
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index 37f7376..39fd16d 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -8270,6 +8270,59 @@ def test_op_all_names_monitor():
 del os.environ['MXNET_SUBGRAPH_BACKEND']
 
 @with_seed()
+def test_monitor_with_variable_input_shape():
+output = {}
+
+def get_output_min_callback(name, arr):
+name = py_str(name)
+handle = ctypes.cast(arr, NDArrayHandle)
+arr = NDArray(handle, writable=False)
+min_val = mx.ndarray.min(arr).asscalar()
+if name in output:
+output[name] = min(output[name], min_val)
+else:
+output[name] = min_val
+
+def check_result(output, names):
+assert len(output) > 0
+for k, v in output.items():
+assert k in names
+assert v is not None
+
+is_windows = sys.platform.startswith('win')
+if (is_windows):
+# Windows doesn't support set environment variable on the fly, so 
disable it for now
+pass
+else:
+# Disable subgraph in case subgraph will replace symbol
+os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
+
+batch_size = 1
+op_name = 'conv'
+dshape = (batch_size, 3, 10, 10)
+data = mx.sym.Variable('data', shape=dshape)
+sym = mx.sym.Convolution(data, kernel=(1, 1), num_filter=1, 
name=op_name)
+
+mod = mx.module.Module(symbol=sym, label_names=None)
+mod.bind(for_training=False, data_shapes=[('data', dshape)])
+mod.init_params()
+mod._exec_group.execs[0].set_monitor_callback(get_output_min_callback, 
monitor_all=True)
+
+new_dshape = dshape[:-1] + (dshape[-1] + 4,)
+new_data = mx.nd.random.uniform(shape=new_dshape)
+new_data = mx.io.NDArrayIter(data=new_data, batch_size=batch_size)
+new_data = DummyIter(new_data)
+
+for batch in new_data:
+mod.forward(data_batch=batch, is_train=False)
+mx.nd.waitall()
+break
+
+name_list = ['data', 'conv_data', 'conv_weight', 'conv_bias', 
'conv_output']
+

[incubator-mxnet] branch v1.6.x updated: [v1.6] Fix the monitor_callback invalid issue during calibration with variable input shapes (#18632)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.6.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.6.x by this push:
 new e503704  [v1.6] Fix the monitor_callback invalid issue during 
calibration with variable input shapes (#18632)
e503704 is described below

commit e503704bb13bd1873572cf081a7566416f72b7b8
Author: ciyong 
AuthorDate: Thu Jul 2 13:20:29 2020 +0800

[v1.6] Fix the monitor_callback invalid issue during calibration with 
variable input shapes (#18632)

* Fix the monitor_callback invalid issue during calibration with variable 
input shapes

* retrigger CI

* Add UT for monitor check and disable codecov
---
 .codecov.yml   |  3 ++
 python/mxnet/executor.py   |  9 ++
 tests/python/unittest/test_operator.py | 55 +-
 3 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/.codecov.yml b/.codecov.yml
index 97624c2..70037e6 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -4,6 +4,9 @@ codecov:
 require_ci_to_pass: yes
 
 coverage:
+  status:
+project: off
+patch: off
   precision: 2
   round: down
   range: "70...100"
diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py
index 3b79f0c..19ef5bb 100644
--- a/python/mxnet/executor.py
+++ b/python/mxnet/executor.py
@@ -79,6 +79,7 @@ class Executor(object):
 self._aux_dict = None
 self._output_dict = None
 self._monitor_callback = None
+self._monitor_all = None
 self._ctx = copy.deepcopy(ctx)
 self._grad_req = copy.deepcopy(grad_req)
 self._group2ctx = copy.deepcopy(group2ctx)
@@ -253,6 +254,7 @@ class Executor(object):
 """
 cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, 
ctypes.c_void_p)
 self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
+self._monitor_all = monitor_all
 check_call(_LIB.MXExecutorSetMonitorCallbackEX(
 self.handle,
 self._monitor_callback,
@@ -469,6 +471,13 @@ class Executor(object):
 executor.arg_arrays = arg_arrays
 executor.grad_arrays = grad_arrays
 executor.aux_arrays = aux_arrays
+if (self._monitor_callback is not None) and (self._monitor_all is not 
None):
+# rebind callback to the new executor if the callback is valid
+check_call(_LIB.MXExecutorSetMonitorCallbackEX(
+handle,
+self._monitor_callback,
+None,
+ctypes.c_int(self._monitor_all)))
 return executor
 
 def debug_str(self):
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index 37f7376..39fd16d 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -8270,6 +8270,59 @@ def test_op_all_names_monitor():
 del os.environ['MXNET_SUBGRAPH_BACKEND']
 
 @with_seed()
+def test_monitor_with_variable_input_shape():
+output = {}
+
+def get_output_min_callback(name, arr):
+name = py_str(name)
+handle = ctypes.cast(arr, NDArrayHandle)
+arr = NDArray(handle, writable=False)
+min_val = mx.ndarray.min(arr).asscalar()
+if name in output:
+output[name] = min(output[name], min_val)
+else:
+output[name] = min_val
+
+def check_result(output, names):
+assert len(output) > 0
+for k, v in output.items():
+assert k in names
+assert v is not None
+
+is_windows = sys.platform.startswith('win')
+if (is_windows):
+# Windows doesn't support set environment variable on the fly, so 
disable it for now
+pass
+else:
+# Disable subgraph in case subgraph will replace symbol
+os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
+
+batch_size = 1
+op_name = 'conv'
+dshape = (batch_size, 3, 10, 10)
+data = mx.sym.Variable('data', shape=dshape)
+sym = mx.sym.Convolution(data, kernel=(1, 1), num_filter=1, 
name=op_name)
+
+mod = mx.module.Module(symbol=sym, label_names=None)
+mod.bind(for_training=False, data_shapes=[('data', dshape)])
+mod.init_params()
+mod._exec_group.execs[0].set_monitor_callback(get_output_min_callback, 
monitor_all=True)
+
+new_dshape = dshape[:-1] + (dshape[-1] + 4,)
+new_data = mx.nd.random.uniform(shape=new_dshape)
+new_data = mx.io.NDArrayIter(data=new_data, batch_size=batch_size)
+new_data = DummyIter(new_data)
+
+for batch in new_data:
+mod.forward(data_batch=batch, is_train=False)
+mx.nd.waitall()
+break
+
+name_list = ['data', 'conv_data', 'conv_weight', 'conv_bias', 
'conv_output']
+

[incubator-mxnet] branch v1.6.x updated: [v1.6] Fix the monitor_callback invalid issue during calibration with variable input shapes (#18632)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.6.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.6.x by this push:
 new e503704  [v1.6] Fix the monitor_callback invalid issue during 
calibration with variable input shapes (#18632)
e503704 is described below

commit e503704bb13bd1873572cf081a7566416f72b7b8
Author: ciyong 
AuthorDate: Thu Jul 2 13:20:29 2020 +0800

[v1.6] Fix the monitor_callback invalid issue during calibration with 
variable input shapes (#18632)

* Fix the monitor_callback invalid issue during calibration with variable 
input shapes

* retrigger CI

* Add UT for monitor check and disable codecov
---
 .codecov.yml   |  3 ++
 python/mxnet/executor.py   |  9 ++
 tests/python/unittest/test_operator.py | 55 +-
 3 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/.codecov.yml b/.codecov.yml
index 97624c2..70037e6 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -4,6 +4,9 @@ codecov:
 require_ci_to_pass: yes
 
 coverage:
+  status:
+project: off
+patch: off
   precision: 2
   round: down
   range: "70...100"
diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py
index 3b79f0c..19ef5bb 100644
--- a/python/mxnet/executor.py
+++ b/python/mxnet/executor.py
@@ -79,6 +79,7 @@ class Executor(object):
 self._aux_dict = None
 self._output_dict = None
 self._monitor_callback = None
+self._monitor_all = None
 self._ctx = copy.deepcopy(ctx)
 self._grad_req = copy.deepcopy(grad_req)
 self._group2ctx = copy.deepcopy(group2ctx)
@@ -253,6 +254,7 @@ class Executor(object):
 """
 cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, 
ctypes.c_void_p)
 self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
+self._monitor_all = monitor_all
 check_call(_LIB.MXExecutorSetMonitorCallbackEX(
 self.handle,
 self._monitor_callback,
@@ -469,6 +471,13 @@ class Executor(object):
 executor.arg_arrays = arg_arrays
 executor.grad_arrays = grad_arrays
 executor.aux_arrays = aux_arrays
+if (self._monitor_callback is not None) and (self._monitor_all is not 
None):
+# rebind callback to the new executor if the callback is valid
+check_call(_LIB.MXExecutorSetMonitorCallbackEX(
+handle,
+self._monitor_callback,
+None,
+ctypes.c_int(self._monitor_all)))
 return executor
 
 def debug_str(self):
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index 37f7376..39fd16d 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -8270,6 +8270,59 @@ def test_op_all_names_monitor():
 del os.environ['MXNET_SUBGRAPH_BACKEND']
 
 @with_seed()
+def test_monitor_with_variable_input_shape():
+output = {}
+
+def get_output_min_callback(name, arr):
+name = py_str(name)
+handle = ctypes.cast(arr, NDArrayHandle)
+arr = NDArray(handle, writable=False)
+min_val = mx.ndarray.min(arr).asscalar()
+if name in output:
+output[name] = min(output[name], min_val)
+else:
+output[name] = min_val
+
+def check_result(output, names):
+assert len(output) > 0
+for k, v in output.items():
+assert k in names
+assert v is not None
+
+is_windows = sys.platform.startswith('win')
+if (is_windows):
+# Windows doesn't support set environment variable on the fly, so 
disable it for now
+pass
+else:
+# Disable subgraph in case subgraph will replace symbol
+os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
+
+batch_size = 1
+op_name = 'conv'
+dshape = (batch_size, 3, 10, 10)
+data = mx.sym.Variable('data', shape=dshape)
+sym = mx.sym.Convolution(data, kernel=(1, 1), num_filter=1, 
name=op_name)
+
+mod = mx.module.Module(symbol=sym, label_names=None)
+mod.bind(for_training=False, data_shapes=[('data', dshape)])
+mod.init_params()
+mod._exec_group.execs[0].set_monitor_callback(get_output_min_callback, 
monitor_all=True)
+
+new_dshape = dshape[:-1] + (dshape[-1] + 4,)
+new_data = mx.nd.random.uniform(shape=new_dshape)
+new_data = mx.io.NDArrayIter(data=new_data, batch_size=batch_size)
+new_data = DummyIter(new_data)
+
+for batch in new_data:
+mod.forward(data_batch=batch, is_train=False)
+mx.nd.waitall()
+break
+
+name_list = ['data', 'conv_data', 'conv_weight', 'conv_bias', 
'conv_output']
+

[incubator-mxnet] branch master updated: Fix BatchNorm backward synchronization (#18644)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
 new 37bed6e  Fix BatchNorm backward synchronization (#18644)
37bed6e is described below

commit 37bed6e3af794624d651e888101eceb30c27c001
Author: Andrzej Kotłowski 
AuthorDate: Wed Jul 1 16:39:22 2020 +0200

Fix BatchNorm backward synchronization (#18644)

* Add test for BatchNorm running variables synchronization

* Fix BatchNorm backward synchronization

It fixes issue #18610
---
 src/operator/nn/batch_norm.cc   |  3 +++
 tests/python/unittest/test_gluon.py | 26 ++
 2 files changed, 29 insertions(+)

diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 8dbd271..7e540ca 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -653,6 +653,9 @@ then set ``gamma`` to 1 and its gradient to 0.
 NNVM_REGISTER_OP(_backward_BatchNorm)
 .set_num_inputs(8)
 .set_num_outputs(3)
+.set_attr("FMutateInputs", [](const nnvm::NodeAttrs& 
attrs) {
+  return std::vector{6, 7};   // moving_mean, moving_var
+})
 .set_attr("TIsBackward", true)
 .set_attr("FInferStorageType", BatchNormStorageType)
 #if MXNET_USE_MKLDNN == 1
diff --git a/tests/python/unittest/test_gluon.py 
b/tests/python/unittest/test_gluon.py
index 47ef86f..77d5119 100644
--- a/tests/python/unittest/test_gluon.py
+++ b/tests/python/unittest/test_gluon.py
@@ -666,6 +666,32 @@ def test_pool():
 
 
 @with_seed()
+@pytest.mark.parametrize('variable', ['running_var', 'running_mean'])
+def test_batchnorm_backward_synchronization(variable):
+"""
+Tests if synchronization of BatchNorm running variables is done correctly.
+If not, the test sometimes fails - depending on the timing.
+"""
+ctx = mx.test_utils.default_context()
+
+for _ in range(20):
+layer = nn.BatchNorm()
+layer.initialize(ctx=ctx)
+for _ in range(3):
+data = mx.nd.random.normal(loc=10, scale=2, shape=(1, 3, 10, 10), 
ctx=ctx)
+with mx.autograd.record():
+out = layer(data)
+out.backward()
+
+# check if each read give the same value
+var1 = getattr(layer, variable).data().asnumpy()
+for _ in range(10):
+var2 = getattr(layer, variable).data().asnumpy()
+if (var1 != var2).any():
+raise AssertionError("Two consecutive reads of " + variable + 
" give different results")
+
+
+@with_seed()
 def test_batchnorm():
 layer = nn.BatchNorm(in_channels=10)
 check_layer_forward(layer, (2, 10, 10, 10))



[incubator-mxnet] branch master updated: Fix BatchNorm backward synchronization (#18644)

2020-07-01 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
 new 37bed6e  Fix BatchNorm backward synchronization (#18644)
37bed6e is described below

commit 37bed6e3af794624d651e888101eceb30c27c001
Author: Andrzej Kotłowski 
AuthorDate: Wed Jul 1 16:39:22 2020 +0200

Fix BatchNorm backward synchronization (#18644)

* Add test for BatchNorm running variables synchronization

* Fix BatchNorm backward synchronization

It fixes issue #18610
---
 src/operator/nn/batch_norm.cc   |  3 +++
 tests/python/unittest/test_gluon.py | 26 ++
 2 files changed, 29 insertions(+)

diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 8dbd271..7e540ca 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -653,6 +653,9 @@ then set ``gamma`` to 1 and its gradient to 0.
 NNVM_REGISTER_OP(_backward_BatchNorm)
 .set_num_inputs(8)
 .set_num_outputs(3)
+.set_attr("FMutateInputs", [](const nnvm::NodeAttrs& 
attrs) {
+  return std::vector{6, 7};   // moving_mean, moving_var
+})
 .set_attr("TIsBackward", true)
 .set_attr("FInferStorageType", BatchNormStorageType)
 #if MXNET_USE_MKLDNN == 1
diff --git a/tests/python/unittest/test_gluon.py 
b/tests/python/unittest/test_gluon.py
index 47ef86f..77d5119 100644
--- a/tests/python/unittest/test_gluon.py
+++ b/tests/python/unittest/test_gluon.py
@@ -666,6 +666,32 @@ def test_pool():
 
 
 @with_seed()
+@pytest.mark.parametrize('variable', ['running_var', 'running_mean'])
+def test_batchnorm_backward_synchronization(variable):
+"""
+Tests if synchronization of BatchNorm running variables is done correctly.
+If not, the test sometimes fails - depending on the timing.
+"""
+ctx = mx.test_utils.default_context()
+
+for _ in range(20):
+layer = nn.BatchNorm()
+layer.initialize(ctx=ctx)
+for _ in range(3):
+data = mx.nd.random.normal(loc=10, scale=2, shape=(1, 3, 10, 10), 
ctx=ctx)
+with mx.autograd.record():
+out = layer(data)
+out.backward()
+
+# check if each read give the same value
+var1 = getattr(layer, variable).data().asnumpy()
+for _ in range(10):
+var2 = getattr(layer, variable).data().asnumpy()
+if (var1 != var2).any():
+raise AssertionError("Two consecutive reads of " + variable + 
" give different results")
+
+
+@with_seed()
 def test_batchnorm():
 layer = nn.BatchNorm(in_channels=10)
 check_layer_forward(layer, (2, 10, 10, 10))



svn commit: r40058 - /dev/incubator/mxnet/KEYS

2020-06-17 Thread taolv
Author: taolv
Date: Wed Jun 17 07:14:55 2020
New Revision: 40058

Log:
update keys file for Ciyong Chen

Modified:
dev/incubator/mxnet/KEYS

Modified: dev/incubator/mxnet/KEYS
==
--- dev/incubator/mxnet/KEYS (original)
+++ dev/incubator/mxnet/KEYS Wed Jun 17 07:14:55 2020
@@ -982,3 +982,61 @@ RC170UEAUfOS7AvNBUIjaA2yVHecufA/a5pJO8ka
 G5Kq0VNdNBAZJNvKNAqRb+kDQuEm/D+HbiVxI9dWdIvL
 =m8Mt
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/DF20B044 2020-06-16
+uid  Ciyong Chen (CODE SIGNING KEY) 
+sig 3DF20B044 2020-06-16  Ciyong Chen (CODE SIGNING KEY) 

+sub   4096R/E918632E 2020-06-16
+sig  DF20B044 2020-06-16  Ciyong Chen (CODE SIGNING KEY) 

+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v2.0.22 (GNU/Linux)
+
+mQINBF7ojeABEADX4yGle/XEyjFhDYDAXklF1B+anNycuZONa7lLPGmvEz4Tn5Kt
+KuQMT/LAUagz4ZoHMLWBmVHW/Ri7PVILV7LRovxNPKZyTKVdh2wdGmQTwew437Mh
++VDkZqv7b8tSgp+0ITHl/yeBGO1iiDf/epNA8llChNwj0SyV9pmlN0fSIAf1HNdV
+gyn+wVSEPdCBzNLPZUJo52qx0RH4uUl1alNR58w3QzJcXIxl92bBR33OnX5VOhkw
+sKtDc5E5cT9wPYF0mEA/lGJGStoSjz4JnLzP86RxkmGKCagNxqSvxpTeQB1mDFY8
+iNVkapYYkgliGqQ/dXfKwf6iyifsxZxKFYvwbDqOPjwx6cRHjC5Caz+snYtD5bsB
+4HEtXg3ISbNNWwlP182nnGJRyoo/FrASlgVkzwRP+ocTSMKPAGKsyHF1WQhFLuWZ
+O2HFFxTgcCHEZMoNiF62dcDYEsaO0oDUCGuwvCwU//x8sxv+UY4UqZfiLUN2CxnL
+3xTM5MrO1TVV4R6YtqAHSBD2cxB1mayd3O7YW5P2v72ctK6mmjDIY9WFqLjuZJvn
+HO6j8iKHRhqQA2qRj19m4jH28+IiUOeL0qTl0ZGq7PEFHOEkq1b0uwpw1HDuISi0
+HXdlwKs/1hVwv66HVKVBz0FJNWC+ij5l9de3H7mc/xio2nDzU/7iFT3gUwARAQAB
+tDdDaXlvbmcgQ2hlbiAoQ09ERSBTSUdOSU5HIEtFWSkgPGNpeW9uZy5jaGVuQGFw
+YWNoZS5vcmc+iQI3BBMBCgAhBQJe6I3gAhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4B
+AheAAAoJELa/hk/fILBEZIEP/1MjsnXm/cN79daGmFO+EBspLF72dX6J+qgdvgZd
+HLG96d/tB2WmV6WKyEFfHZEbvoND25hXylJOXc85lszaaGHfZN1Yg/kCTjkLHIP0
+wKx7yAGVliYj/yDI67wkJewk9zjQfo3ns9S71rwML09+8PShvSmOK/eAnz607vfx
+Y9U4AHkSErp3xA1+X11M7n3mUy2+Hto6ioTZNk30+S+GL9DbkmEOzSNseziI01HP
+KbFYq2SYwn7gaDnbnuppaxDar2JBm9lAUvNo2EQtKEqNnXnfS6UJ1H/fhDXyiHto
+BWo5kk1c+RyZwk3fUP9clyCH6Mq1PFnFE8JwM0lUhTY3EGTI5AI1G2ML8rfhEpYU
+KYQr6fa+V1g7Tghc1w441j1WQs0akgj3iPuQJTot2e67qwz59U2cMO+ZP8RHFAHO
+FCcskpK4fHCY7X8CkutAJnV4j8KCaR9bxr2mjQ5N5OmAyIqSfARucShA40sqbfIQ
+WjnH/u+dZMA42x1yAn9PUOvyGok4zovD7x6Uvb7fww70QBe7nwgNKdAYd0CyLapt
+UCsfWfco9pKBZf0kL/2c5zwxowObYDuOYO6wjFNfF+oC/aaHgbfmEg2bles2aYyi
+dgbcAuzY/U5HXmVDhD2S57UpJoLEa2LPyUsZiyYUWC2boirNB9kZVd0lmQAOiPwR
+mK2TuQINBF7ojeABEADQuZwV6+mZIyb+GvvPNOXkzZ555QGaYdXpclG3t186bSIw
+euP1GmEIMnKz1SKLKaGZdoqr13I44dSIOYBuixwJITsHc0p38OYsW93cjdvE/SQ6
+o+PW2/4DQgRYbEMoDM0heOpgTgwvrvFWxli6oy0QU/lmsIc1jsA+PH6XmuEsxLRj
+J6SGZC3P3px6w+KIY9XSub24daDxNGMsd0+f4PNd6brLJpehMas0we10jTDjcqPA
+WqV6Lh9+/JH+VN65jRHYdV+zOz+oNx0U6mjjRmhFu7hWooCH1C6y75dE00L31D9c
+H+PAPvWUWwodwzODjEmpdFwtaOCoM8XSIXQ+4EO57fkyIEkM3xildW+ISU4sI2yP
+9ANCjFr4OH8/bp1lJCfNCn3dB/KUuYIDGVA/4BGYS4pOqy4fvmWiG705q1kYP5f+
+Di0DS/JS97Jtn5uD2sKiqKv4uj3LrkphJ1Ek896Vr0uQ30w176fDLE7Ru+ioXmeL
+CD7KNaSe8s1eVnaFdjIXgo4ELh8h4frDzoSxQU6omf6QIuld15TaeQjeGIckfkAE
+JRrnxM88C10gfdpiLAebBsXLRx+DKFYjj8vkw/fsx7R1s9Fl1EjofaoI0D9/GvmS
+meFfJBkwZzzwyV6AFcQY+OiqZ4bZ0rlUdU4t+yioB9jgYymQtsxju/P8E0xoaQAR
+AQABiQIfBBgBCgAJBQJe6I3gAhsMAAoJELa/hk/fILBEW90P/ioPSCtu93AytJIZ
+vOb7NWe9B/IA8nn/nsUhYuowLRb7zGrcowDF3Sp7MhWVGGlD9VN4bm35u4OuK0Uj
+6eyotrVGrEEaHoZbRK/wd3ZuLXM4C+SslDQ6DDz5dKmBuazLkCRSM4QbrnWc9Dy9
+8mg4LhAGf+DeNypNuKJVWLxHMNy9/7C7R0ikiaQzIztXe/AvVPMXXlFq7m32lPkJ
+Ek69AqW+cNRneOxG0y2qNf8YhuQA0WPrUAYaOV6z0KprDjcVBMqPEHGuASsNz4pJ
+tJtMC/HADstnsA60SsgPtmXR3gKntd5Zuapd2s8koqrzVyOardKwyCgggW4TfzO/
+ZAxPGB+Fhy6iHWtsNpfz4S5AFfjY3j/TqQSbDvyd7gl2gcWVgL5TzOsK9PpCKR4O
+/tsdnUnN6efdVjlDM4BlylbRLIxKMQ/wAXGxJo2i/blJPeIMicgRNQsgN/v4GKny
+lecb5CJWE/kNPasRBXC4eJElO1hE6iQ7N4Si5tyMDx9Ypt8sYP2ZTQY18ZB0JiYp
+ljW9AqQU1dUSRhNFGtrrzRG+eC0CQ8PaEl+K0kGxiUFFb1ocSvzdvc0BAH4s+zJU
+AQAs0D5H4b+5HBUfG8J4Tz9H5Af5D6qHxOIAiNr83wAXht6O3aoi6a0L38fckxUW
+0QaeS4PIKmE5b8gFJ83tOE+T0Hnm
+=80Bp
+-END PGP PUBLIC KEY BLOCK-




[incubator-mxnet] branch v1.x updated: Julia: remove downloading of the non-ASF binary build (#18489) (#18503)

2020-06-08 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
 new de481d3  Julia: remove downloading of the non-ASF binary build 
(#18489) (#18503)
de481d3 is described below

commit de481d3fdd266fb14b1673fa6fb653d3da029b2e
Author: ciyong 
AuthorDate: Mon Jun 8 22:13:40 2020 +0800

Julia: remove downloading of the non-ASF binary build (#18489) (#18503)
---
 julia/deps/build.jl | 35 +--
 1 file changed, 1 insertion(+), 34 deletions(-)

diff --git a/julia/deps/build.jl b/julia/deps/build.jl
index a79d2a0..5666993 100644
--- a/julia/deps/build.jl
+++ b/julia/deps/build.jl
@@ -111,40 +111,7 @@ using BinDeps
 @BinDeps.setup
 if !libmxnet_detected
   if Sys.iswindows()
-if Sys.ARCH != :x86_64
-  @info("Prebuilt windows binaries are only available on 64bit. You will 
have to built MXNet yourself.")
-  return
-end
-@info("Downloading pre-built packages for Windows.")
-base_url = 
"https://github.com/yajiedesign/mxnet/releases/download/weekly_binary_build_v2/prebuildbase_win10_x64_vc14_v2.7z;
-
-if libmxnet_curr_ver == "master"
-  _cmd = "{
-[System.Net.ServicePointManager]::SecurityProtocol='tls12';
-Invoke-WebRequest -Uri 
'https://api.github.com/repos/yajiedesign/mxnet/releases/latest'
--OutFile 'mxnet.json'}"
-  # download_cmd uses powershell 2, but we need powershell 3 to do this
-  run(`powershell -NoProfile -Command $_cmd`)
-  curr_win = JSON.parsefile("mxnet.json")["tag_name"]
-  @info("Can't use MXNet master on Windows, using latest binaries from 
$curr_win.")
-end
-# TODO: Get url from JSON.
-# TODO: detect cuda version and select corresponding url.
-name = "mxnet_x64_$(HAS_CUDA ? "vc141_gpu_cu101" : "vc14_cpu").7z"
-package_url = 
"https://github.com/yajiedesign/mxnet/releases/download/$(curr_win)/$(curr_win)_$(name)"
-
-exe7z = joinpath(Sys.BINDIR, "7z.exe")
-
-run(download_cmd(package_url, "mxnet.7z"))
-# this command will create the dir "usr\\lib"
-run(`$exe7z e mxnet.7z *\\build\\* *\\lib\\* -y -ousr\\lib`)
-
-run(download_cmd(base_url, "mxnet_base.7z"))
-run(`$exe7z x mxnet_base.7z -y -ousr`)
-run(`cmd /c copy 
"usr\\prebuildbase_win10_x64_vc14_v2\\3rdparty\\bin\\*.dll" "usr\\lib"`)
-
-# testing
-run(`cmd /c dir "usr\\lib"`)
+@info("Prebuilt windows binaries is not available currently. You will have 
to built MXNet yourself.")
 return
   end
 



[incubator-mxnet] branch v1.x updated: Julia: remove downloading of the non-ASF binary build (#18489) (#18503)

2020-06-08 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
 new de481d3  Julia: remove downloading of the non-ASF binary build 
(#18489) (#18503)
de481d3 is described below

commit de481d3fdd266fb14b1673fa6fb653d3da029b2e
Author: ciyong 
AuthorDate: Mon Jun 8 22:13:40 2020 +0800

Julia: remove downloading of the non-ASF binary build (#18489) (#18503)
---
 julia/deps/build.jl | 35 +--
 1 file changed, 1 insertion(+), 34 deletions(-)

diff --git a/julia/deps/build.jl b/julia/deps/build.jl
index a79d2a0..5666993 100644
--- a/julia/deps/build.jl
+++ b/julia/deps/build.jl
@@ -111,40 +111,7 @@ using BinDeps
 @BinDeps.setup
 if !libmxnet_detected
   if Sys.iswindows()
-if Sys.ARCH != :x86_64
-  @info("Prebuilt windows binaries are only available on 64bit. You will 
have to built MXNet yourself.")
-  return
-end
-@info("Downloading pre-built packages for Windows.")
-base_url = 
"https://github.com/yajiedesign/mxnet/releases/download/weekly_binary_build_v2/prebuildbase_win10_x64_vc14_v2.7z;
-
-if libmxnet_curr_ver == "master"
-  _cmd = "{
-[System.Net.ServicePointManager]::SecurityProtocol='tls12';
-Invoke-WebRequest -Uri 
'https://api.github.com/repos/yajiedesign/mxnet/releases/latest'
--OutFile 'mxnet.json'}"
-  # download_cmd uses powershell 2, but we need powershell 3 to do this
-  run(`powershell -NoProfile -Command $_cmd`)
-  curr_win = JSON.parsefile("mxnet.json")["tag_name"]
-  @info("Can't use MXNet master on Windows, using latest binaries from 
$curr_win.")
-end
-# TODO: Get url from JSON.
-# TODO: detect cuda version and select corresponding url.
-name = "mxnet_x64_$(HAS_CUDA ? "vc141_gpu_cu101" : "vc14_cpu").7z"
-package_url = 
"https://github.com/yajiedesign/mxnet/releases/download/$(curr_win)/$(curr_win)_$(name)"
-
-exe7z = joinpath(Sys.BINDIR, "7z.exe")
-
-run(download_cmd(package_url, "mxnet.7z"))
-# this command will create the dir "usr\\lib"
-run(`$exe7z e mxnet.7z *\\build\\* *\\lib\\* -y -ousr\\lib`)
-
-run(download_cmd(base_url, "mxnet_base.7z"))
-run(`$exe7z x mxnet_base.7z -y -ousr`)
-run(`cmd /c copy 
"usr\\prebuildbase_win10_x64_vc14_v2\\3rdparty\\bin\\*.dll" "usr\\lib"`)
-
-# testing
-run(`cmd /c dir "usr\\lib"`)
+@info("Prebuilt windows binaries is not available currently. You will have 
to built MXNet yourself.")
 return
   end
 



[incubator-mxnet] branch v1.x updated: more support for boolean indexing and assign (#18351)

2020-05-27 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
 new 0c6785f  more support for boolean indexing and assign (#18351)
0c6785f is described below

commit 0c6785fe66201dfba340feac6766a93018fedce8
Author: alicia <32725332+alicia1...@users.noreply.github.com>
AuthorDate: Thu May 28 11:07:24 2020 +0800

more support for boolean indexing and assign (#18351)
---
 python/mxnet/ndarray/ndarray.py | 100 
 python/mxnet/numpy/multiarray.py| 230 
 src/operator/numpy/np_nonzero_op.cc |   2 +-
 src/operator/tensor/indexing_op.cc  |   2 +-
 src/operator/tensor/indexing_op.cu  |   2 +-
 src/operator/tensor/indexing_op.h   |   4 +-
 tests/python/unittest/test_numpy_ndarray.py |  36 -
 7 files changed, 206 insertions(+), 170 deletions(-)

diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py
index cda3166..7ac666e 100644
--- a/python/mxnet/ndarray/ndarray.py
+++ b/python/mxnet/ndarray/ndarray.py
@@ -39,6 +39,7 @@ from ..base import mx_uint, NDArrayHandle, check_call, 
DLPackHandle, mx_int, mx_
 from ..base import ctypes2buffer
 from ..runtime import Features
 from ..context import Context, current_context
+from ..util import is_np_array
 from . import _internal
 from . import op
 from ._internal import NDArrayBase
@@ -111,7 +112,11 @@ _NDARRAY_UNSUPPORTED_INDEXING = -1
 _NDARRAY_BASIC_INDEXING = 0
 _NDARRAY_ADVANCED_INDEXING = 1
 _NDARRAY_EMPTY_TUPLE_INDEXING = 2
-_NDARRAY_BOOLEAN_INDEXING = 3
+
+# Return code for 0-d boolean array handler
+_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
 
 # Caching whether MXNet was built with INT64 support or not
 _INT64_TENSOR_SIZE_ENABLED = None
@@ -521,7 +526,7 @@ fixed-size items.
 return
 
 else:
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 slc_key = tuple(idx for idx in key if idx is not None)
 
 if len(slc_key) < self.ndim:
@@ -714,7 +719,7 @@ fixed-size items.
 elif key.step == 0:
 raise ValueError("slice step cannot be zero")
 
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 if len(key) == 0:
 raise ValueError('indexing key cannot be an empty tuple')
 
@@ -2574,9 +2579,12 @@ fixed-size items.
 >>> type(x.asscalar())
 
 """
-if self.shape != (1,):
+if self.size != 1:
 raise ValueError("The current array is not a scalar")
-return self.asnumpy()[0]
+if self.ndim == 1:
+return self.asnumpy()[0]
+else:
+return self.asnumpy()[()]
 
 def astype(self, dtype, copy=True):
 """Returns a copy of the array after casting to a specified type.
@@ -2943,9 +2951,23 @@ fixed-size items.
 lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
 )
 
+def check_boolean_array_dimension(array_shape, axis, bool_shape):
+"""
+Advanced boolean indexing is implemented through the use of `nonzero`.
+Size check is necessary to make sure that the boolean array
+has exactly as many dimensions as it is supposed to work with before the 
conversion
+"""
+for i, val in enumerate(bool_shape):
+if array_shape[axis + i] != val:
+raise IndexError('boolean index did not match indexed array along 
axis {};'
+ ' size is {} but corresponding boolean size is {}'
+ .format(axis + i, array_shape[axis + i], val))
 
 def indexing_key_expand_implicit_axes(key, shape):
-"""Make implicit axes explicit by adding ``slice(None)``.
+"""
+Make implicit axes explicit by adding ``slice(None)``
+and convert boolean array to integer array through `nonzero`.
+
 Examples
 
 >>> shape = (3, 4, 5)
@@ -2957,6 +2979,11 @@ def indexing_key_expand_implicit_axes(key, shape):
 (0, slice(None, None, None), slice(None, None, None))
 >>> indexing_key_expand_implicit_axes(np.s_[:2, None, 0, ...], shape)
 (slice(None, 2, None), None, 0, slice(None, None, None))
+>>> bool_array = np.array([[True, False, True, False],
+   [False, True, False, True],
+   [True, False, True, False]], dtype=np.bool)
+>>> indexing_key_expand_implicit_axes(np.s_[bool_array, None, 0:2], shape)
+(ar

[incubator-mxnet] branch v1.x updated: more support for boolean indexing and assign (#18351)

2020-05-27 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
 new 0c6785f  more support for boolean indexing and assign (#18351)
0c6785f is described below

commit 0c6785fe66201dfba340feac6766a93018fedce8
Author: alicia <32725332+alicia1...@users.noreply.github.com>
AuthorDate: Thu May 28 11:07:24 2020 +0800

more support for boolean indexing and assign (#18351)
---
 python/mxnet/ndarray/ndarray.py | 100 
 python/mxnet/numpy/multiarray.py| 230 
 src/operator/numpy/np_nonzero_op.cc |   2 +-
 src/operator/tensor/indexing_op.cc  |   2 +-
 src/operator/tensor/indexing_op.cu  |   2 +-
 src/operator/tensor/indexing_op.h   |   4 +-
 tests/python/unittest/test_numpy_ndarray.py |  36 -
 7 files changed, 206 insertions(+), 170 deletions(-)

diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py
index cda3166..7ac666e 100644
--- a/python/mxnet/ndarray/ndarray.py
+++ b/python/mxnet/ndarray/ndarray.py
@@ -39,6 +39,7 @@ from ..base import mx_uint, NDArrayHandle, check_call, 
DLPackHandle, mx_int, mx_
 from ..base import ctypes2buffer
 from ..runtime import Features
 from ..context import Context, current_context
+from ..util import is_np_array
 from . import _internal
 from . import op
 from ._internal import NDArrayBase
@@ -111,7 +112,11 @@ _NDARRAY_UNSUPPORTED_INDEXING = -1
 _NDARRAY_BASIC_INDEXING = 0
 _NDARRAY_ADVANCED_INDEXING = 1
 _NDARRAY_EMPTY_TUPLE_INDEXING = 2
-_NDARRAY_BOOLEAN_INDEXING = 3
+
+# Return code for 0-d boolean array handler
+_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
 
 # Caching whether MXNet was built with INT64 support or not
 _INT64_TENSOR_SIZE_ENABLED = None
@@ -521,7 +526,7 @@ fixed-size items.
 return
 
 else:
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 slc_key = tuple(idx for idx in key if idx is not None)
 
 if len(slc_key) < self.ndim:
@@ -714,7 +719,7 @@ fixed-size items.
 elif key.step == 0:
 raise ValueError("slice step cannot be zero")
 
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 if len(key) == 0:
 raise ValueError('indexing key cannot be an empty tuple')
 
@@ -2574,9 +2579,12 @@ fixed-size items.
 >>> type(x.asscalar())
 
 """
-if self.shape != (1,):
+if self.size != 1:
 raise ValueError("The current array is not a scalar")
-return self.asnumpy()[0]
+if self.ndim == 1:
+return self.asnumpy()[0]
+else:
+return self.asnumpy()[()]
 
 def astype(self, dtype, copy=True):
 """Returns a copy of the array after casting to a specified type.
@@ -2943,9 +2951,23 @@ fixed-size items.
 lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
 )
 
+def check_boolean_array_dimension(array_shape, axis, bool_shape):
+"""
+Advanced boolean indexing is implemented through the use of `nonzero`.
+Size check is necessary to make sure that the boolean array
+has exactly as many dimensions as it is supposed to work with before the 
conversion
+"""
+for i, val in enumerate(bool_shape):
+if array_shape[axis + i] != val:
+raise IndexError('boolean index did not match indexed array along 
axis {};'
+ ' size is {} but corresponding boolean size is {}'
+ .format(axis + i, array_shape[axis + i], val))
 
 def indexing_key_expand_implicit_axes(key, shape):
-"""Make implicit axes explicit by adding ``slice(None)``.
+"""
+Make implicit axes explicit by adding ``slice(None)``
+and convert boolean array to integer array through `nonzero`.
+
 Examples
 
 >>> shape = (3, 4, 5)
@@ -2957,6 +2979,11 @@ def indexing_key_expand_implicit_axes(key, shape):
 (0, slice(None, None, None), slice(None, None, None))
 >>> indexing_key_expand_implicit_axes(np.s_[:2, None, 0, ...], shape)
 (slice(None, 2, None), None, 0, slice(None, None, None))
+>>> bool_array = np.array([[True, False, True, False],
+   [False, True, False, True],
+   [True, False, True, False]], dtype=np.bool)
+>>> indexing_key_expand_implicit_axes(np.s_[bool_array, None, 0:2], shape)
+(ar

[incubator-mxnet] branch v1.7.x updated: more support for boolean indexing and assign (#18352)

2020-05-27 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 75ab155  more support for boolean indexing and assign (#18352)
75ab155 is described below

commit 75ab15569bd0f20a90806ce2fc38df08be208ed7
Author: alicia <32725332+alicia1...@users.noreply.github.com>
AuthorDate: Thu May 28 11:05:13 2020 +0800

more support for boolean indexing and assign (#18352)
---
 python/mxnet/ndarray/ndarray.py | 100 
 python/mxnet/numpy/multiarray.py| 230 
 src/operator/numpy/np_nonzero_op.cc |   2 +-
 src/operator/tensor/indexing_op.cc  |   2 +-
 src/operator/tensor/indexing_op.cu  |   2 +-
 src/operator/tensor/indexing_op.h   |   4 +-
 tests/python/unittest/test_numpy_ndarray.py |  36 -
 7 files changed, 206 insertions(+), 170 deletions(-)

diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py
index cda3166..7ac666e 100644
--- a/python/mxnet/ndarray/ndarray.py
+++ b/python/mxnet/ndarray/ndarray.py
@@ -39,6 +39,7 @@ from ..base import mx_uint, NDArrayHandle, check_call, 
DLPackHandle, mx_int, mx_
 from ..base import ctypes2buffer
 from ..runtime import Features
 from ..context import Context, current_context
+from ..util import is_np_array
 from . import _internal
 from . import op
 from ._internal import NDArrayBase
@@ -111,7 +112,11 @@ _NDARRAY_UNSUPPORTED_INDEXING = -1
 _NDARRAY_BASIC_INDEXING = 0
 _NDARRAY_ADVANCED_INDEXING = 1
 _NDARRAY_EMPTY_TUPLE_INDEXING = 2
-_NDARRAY_BOOLEAN_INDEXING = 3
+
+# Return code for 0-d boolean array handler
+_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
 
 # Caching whether MXNet was built with INT64 support or not
 _INT64_TENSOR_SIZE_ENABLED = None
@@ -521,7 +526,7 @@ fixed-size items.
 return
 
 else:
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 slc_key = tuple(idx for idx in key if idx is not None)
 
 if len(slc_key) < self.ndim:
@@ -714,7 +719,7 @@ fixed-size items.
 elif key.step == 0:
 raise ValueError("slice step cannot be zero")
 
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 if len(key) == 0:
 raise ValueError('indexing key cannot be an empty tuple')
 
@@ -2574,9 +2579,12 @@ fixed-size items.
 >>> type(x.asscalar())
 
 """
-if self.shape != (1,):
+if self.size != 1:
 raise ValueError("The current array is not a scalar")
-return self.asnumpy()[0]
+if self.ndim == 1:
+return self.asnumpy()[0]
+else:
+return self.asnumpy()[()]
 
 def astype(self, dtype, copy=True):
 """Returns a copy of the array after casting to a specified type.
@@ -2943,9 +2951,23 @@ fixed-size items.
 lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
 )
 
+def check_boolean_array_dimension(array_shape, axis, bool_shape):
+"""
+Advanced boolean indexing is implemented through the use of `nonzero`.
+Size check is necessary to make sure that the boolean array
+has exactly as many dimensions as it is supposed to work with before the 
conversion
+"""
+for i, val in enumerate(bool_shape):
+if array_shape[axis + i] != val:
+raise IndexError('boolean index did not match indexed array along 
axis {};'
+ ' size is {} but corresponding boolean size is {}'
+ .format(axis + i, array_shape[axis + i], val))
 
 def indexing_key_expand_implicit_axes(key, shape):
-"""Make implicit axes explicit by adding ``slice(None)``.
+"""
+Make implicit axes explicit by adding ``slice(None)``
+and convert boolean array to integer array through `nonzero`.
+
 Examples
 
 >>> shape = (3, 4, 5)
@@ -2957,6 +2979,11 @@ def indexing_key_expand_implicit_axes(key, shape):
 (0, slice(None, None, None), slice(None, None, None))
 >>> indexing_key_expand_implicit_axes(np.s_[:2, None, 0, ...], shape)
 (slice(None, 2, None), None, 0, slice(None, None, None))
+>>> bool_array = np.array([[True, False, True, False],
+   [False, True, False, True],
+   [True, False, True, False]], dtype=np.bool)
+>>> indexing_key_expand_implicit_axes(np.s_[bool_array, None, 0:2], 

[incubator-mxnet] branch v1.7.x updated: more support for boolean indexing and assign (#18352)

2020-05-27 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 75ab155  more support for boolean indexing and assign (#18352)
75ab155 is described below

commit 75ab15569bd0f20a90806ce2fc38df08be208ed7
Author: alicia <32725332+alicia1...@users.noreply.github.com>
AuthorDate: Thu May 28 11:05:13 2020 +0800

more support for boolean indexing and assign (#18352)
---
 python/mxnet/ndarray/ndarray.py | 100 
 python/mxnet/numpy/multiarray.py| 230 
 src/operator/numpy/np_nonzero_op.cc |   2 +-
 src/operator/tensor/indexing_op.cc  |   2 +-
 src/operator/tensor/indexing_op.cu  |   2 +-
 src/operator/tensor/indexing_op.h   |   4 +-
 tests/python/unittest/test_numpy_ndarray.py |  36 -
 7 files changed, 206 insertions(+), 170 deletions(-)

diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py
index cda3166..7ac666e 100644
--- a/python/mxnet/ndarray/ndarray.py
+++ b/python/mxnet/ndarray/ndarray.py
@@ -39,6 +39,7 @@ from ..base import mx_uint, NDArrayHandle, check_call, 
DLPackHandle, mx_int, mx_
 from ..base import ctypes2buffer
 from ..runtime import Features
 from ..context import Context, current_context
+from ..util import is_np_array
 from . import _internal
 from . import op
 from ._internal import NDArrayBase
@@ -111,7 +112,11 @@ _NDARRAY_UNSUPPORTED_INDEXING = -1
 _NDARRAY_BASIC_INDEXING = 0
 _NDARRAY_ADVANCED_INDEXING = 1
 _NDARRAY_EMPTY_TUPLE_INDEXING = 2
-_NDARRAY_BOOLEAN_INDEXING = 3
+
+# Return code for 0-d boolean array handler
+_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
+_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
 
 # Caching whether MXNet was built with INT64 support or not
 _INT64_TENSOR_SIZE_ENABLED = None
@@ -521,7 +526,7 @@ fixed-size items.
 return
 
 else:
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 slc_key = tuple(idx for idx in key if idx is not None)
 
 if len(slc_key) < self.ndim:
@@ -714,7 +719,7 @@ fixed-size items.
 elif key.step == 0:
 raise ValueError("slice step cannot be zero")
 
-key = indexing_key_expand_implicit_axes(key, self.shape)
+key, _ = indexing_key_expand_implicit_axes(key, self.shape)
 if len(key) == 0:
 raise ValueError('indexing key cannot be an empty tuple')
 
@@ -2574,9 +2579,12 @@ fixed-size items.
 >>> type(x.asscalar())
 
 """
-if self.shape != (1,):
+if self.size != 1:
 raise ValueError("The current array is not a scalar")
-return self.asnumpy()[0]
+if self.ndim == 1:
+return self.asnumpy()[0]
+else:
+return self.asnumpy()[()]
 
 def astype(self, dtype, copy=True):
 """Returns a copy of the array after casting to a specified type.
@@ -2943,9 +2951,23 @@ fixed-size items.
 lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
 )
 
+def check_boolean_array_dimension(array_shape, axis, bool_shape):
+"""
+Advanced boolean indexing is implemented through the use of `nonzero`.
+Size check is necessary to make sure that the boolean array
+has exactly as many dimensions as it is supposed to work with before the 
conversion
+"""
+for i, val in enumerate(bool_shape):
+if array_shape[axis + i] != val:
+raise IndexError('boolean index did not match indexed array along 
axis {};'
+ ' size is {} but corresponding boolean size is {}'
+ .format(axis + i, array_shape[axis + i], val))
 
 def indexing_key_expand_implicit_axes(key, shape):
-"""Make implicit axes explicit by adding ``slice(None)``.
+"""
+Make implicit axes explicit by adding ``slice(None)``
+and convert boolean array to integer array through `nonzero`.
+
 Examples
 
 >>> shape = (3, 4, 5)
@@ -2957,6 +2979,11 @@ def indexing_key_expand_implicit_axes(key, shape):
 (0, slice(None, None, None), slice(None, None, None))
 >>> indexing_key_expand_implicit_axes(np.s_[:2, None, 0, ...], shape)
 (slice(None, 2, None), None, 0, slice(None, None, None))
+>>> bool_array = np.array([[True, False, True, False],
+   [False, True, False, True],
+   [True, False, True, False]], dtype=np.bool)
+>>> indexing_key_expand_implicit_axes(np.s_[bool_array, None, 0:2], 

[incubator-mxnet] branch v1.x updated (fe90008 -> b523527)

2020-05-26 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from fe90008  [v1.x] Backport edge pipeline (#18375)
 add b523527  Fix memory leaks in Gluon (#18328) (#18359)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/gluon/block.py| 21 ++--
 tests/python/unittest/test_gluon.py| 39 ++
 tests/python/unittest/test_thread_local.py |  5 ++--
 3 files changed, 55 insertions(+), 10 deletions(-)



[incubator-mxnet] branch v1.x updated (fe90008 -> b523527)

2020-05-26 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from fe90008  [v1.x] Backport edge pipeline (#18375)
 add b523527  Fix memory leaks in Gluon (#18328) (#18359)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/gluon/block.py| 21 ++--
 tests/python/unittest/test_gluon.py| 39 ++
 tests/python/unittest/test_thread_local.py |  5 ++--
 3 files changed, 55 insertions(+), 10 deletions(-)



[incubator-mxnet] branch v1.7.x updated: Fix memory leaks in Gluon (#18328) (#18358)

2020-05-18 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new c4d9270  Fix memory leaks in Gluon (#18328) (#18358)
c4d9270 is described below

commit c4d9270dde5c091386dbdbd53f8060a73b98cbc9
Author: Leonard Lausen 
AuthorDate: Mon May 18 18:51:45 2020 -0700

Fix memory leaks in Gluon (#18328) (#18358)

Fix leak of ndarray objects in the frontend due to reference cycle.

Backport of 3e676fc2c88bec75e4463c8fa9b5532664d518c2
---
 python/mxnet/gluon/block.py| 21 ++--
 tests/python/unittest/test_gluon.py| 39 ++
 tests/python/unittest/test_thread_local.py |  5 ++--
 3 files changed, 55 insertions(+), 10 deletions(-)

diff --git a/python/mxnet/gluon/block.py b/python/mxnet/gluon/block.py
index bed6679..968c787 100644
--- a/python/mxnet/gluon/block.py
+++ b/python/mxnet/gluon/block.py
@@ -23,8 +23,10 @@ __all__ = ['Block', 'HybridBlock', 'SymbolBlock']
 import threading
 import copy
 import warnings
-import re
+import weakref
 from collections import OrderedDict, defaultdict
+
+import re
 import numpy as np
 
 from ..base import mx_real_t, MXNetError
@@ -46,7 +48,7 @@ class _BlockScope(object):
 _current = threading.local()
 
 def __init__(self, block):
-self._block = block
+self._block = weakref.ref(block) if block is not None else None
 self._counter = {}
 self._old_scope = None
 self._name_scope = None
@@ -55,7 +57,8 @@ class _BlockScope(object):
 def create(prefix, params, hint):
 """Creates prefix and params for new `Block`."""
 current = getattr(_BlockScope._current, "value", None)
-if current is None:
+block = current._block() if current is not None else None
+if current is None or block is None:
 if prefix is None:
 if not hasattr(_name.NameManager._current, "value"):
 _name.NameManager._current.value = _name.NameManager()
@@ -71,23 +74,25 @@ class _BlockScope(object):
 prefix = '%s%d_'%(hint, count)
 current._counter[hint] = count + 1
 if params is None:
-parent = current._block.params
+parent = block.params
 params = ParameterDict(parent.prefix+prefix, parent._shared)
 else:
 params = ParameterDict(params.prefix, params)
-return current._block.prefix+prefix, params
+return block.prefix + prefix, params
 
 def __enter__(self):
-if self._block._empty_prefix:
+block = self._block()
+if block is None or block._empty_prefix:
 return self
 self._old_scope = getattr(_BlockScope._current, "value", None)
 _BlockScope._current.value = self
-self._name_scope = _name.Prefix(self._block.prefix)
+self._name_scope = _name.Prefix(block.prefix)
 self._name_scope.__enter__()
 return self
 
 def __exit__(self, ptype, value, trace):
-if self._block._empty_prefix:
+block = self._block()
+if block is None or block._empty_prefix:
 return
 self._name_scope.__exit__(ptype, value, trace)
 self._name_scope = None
diff --git a/tests/python/unittest/test_gluon.py 
b/tests/python/unittest/test_gluon.py
index a026825..cf6bc36 100644
--- a/tests/python/unittest/test_gluon.py
+++ b/tests/python/unittest/test_gluon.py
@@ -17,6 +17,7 @@
 
 import os
 import tempfile
+import gc
 
 import mxnet as mx
 from mxnet import gluon
@@ -3212,6 +3213,44 @@ def test_reqs_switching_training_inference():
 
 mx.test_utils.assert_almost_equal(grad1, grad2)
 
+def test_no_memory_leak_in_gluon():
+# Collect all other garbage prior to this test. Otherwise the test may fail
+# due to unrelated memory leaks.
+gc.collect()
+
+gc_flags = gc.get_debug()
+gc.set_debug(gc.DEBUG_SAVEALL)
+net = mx.gluon.nn.Dense(10, in_units=10)
+net.initialize()
+del net
+gc.collect()
+gc.set_debug(gc_flags)  # reset gc flags
+
+# Check for leaked NDArrays
+seen = set()
+def has_array(element):
+try:
+if element in seen:
+return False
+seen.add(element)
+except TypeError:  # unhashable
+pass
+
+if isinstance(element, mx.nd._internal.NDArrayBase):
+return True
+elif hasattr(element, '__dict__'):
+return any(has_array(x) for x in vars(element))
+elif isinstance(element, dict):
+return any(has_array(x) for x in element.items())
+else:
+try:
+return any(has_array(x) for x in element)
+except (TypeError, KeyError):
+return 

[incubator-mxnet] branch v1.7.x updated: fixing batch_norm and layer_norm for large tensors (#17805) (#18261)

2020-05-10 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new ceb0f06  fixing batch_norm and layer_norm for large tensors (#17805) 
(#18261)
ceb0f06 is described below

commit ceb0f06dfc656296f9b8b9fe5a8630a6f5dfae96
Author: Chaitanya Prakash Bapat 
AuthorDate: Sun May 10 19:16:15 2020 -0700

fixing batch_norm and layer_norm for large tensors (#17805) (#18261)

Co-authored-by: Rohit Kumar Srivastava 

Co-authored-by: Rohit Kumar Srivastava 
---
 src/operator/nn/batch_norm.cc | 2 +-
 src/operator/nn/layer_norm.cc | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 97acced..df03573 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -330,7 +330,7 @@ static bool BatchNormShape(const nnvm::NodeAttrs& attrs,
   : param.axis);
   CHECK_LT(channelAxis, dshape.ndim()) << "Channel axis out of range: " << 
param.axis;
 
-  const int channelCount = dshape[channelAxis];
+  const index_t channelCount = dshape[channelAxis];
 
   if (!mxnet::ndim_is_known(dshape)) {
 return false;
diff --git a/src/operator/nn/layer_norm.cc b/src/operator/nn/layer_norm.cc
index d385b93..e3d641a 100644
--- a/src/operator/nn/layer_norm.cc
+++ b/src/operator/nn/layer_norm.cc
@@ -47,7 +47,7 @@ static bool LayerNormShape(const nnvm::NodeAttrs& attrs,
   CHECK(axis >= 0 && axis < dshape.ndim())
 << "Channel axis out of range: axis=" << param.axis;
 
-  const int channelCount = dshape[axis];
+  const index_t channelCount = dshape[axis];
 
   if (!mxnet::ndim_is_known(dshape)) {
 return false;



[incubator-mxnet] branch v1.7.x updated: fixing batch_norm and layer_norm for large tensors (#17805) (#18261)

2020-05-10 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new ceb0f06  fixing batch_norm and layer_norm for large tensors (#17805) 
(#18261)
ceb0f06 is described below

commit ceb0f06dfc656296f9b8b9fe5a8630a6f5dfae96
Author: Chaitanya Prakash Bapat 
AuthorDate: Sun May 10 19:16:15 2020 -0700

fixing batch_norm and layer_norm for large tensors (#17805) (#18261)

Co-authored-by: Rohit Kumar Srivastava 

Co-authored-by: Rohit Kumar Srivastava 
---
 src/operator/nn/batch_norm.cc | 2 +-
 src/operator/nn/layer_norm.cc | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 97acced..df03573 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -330,7 +330,7 @@ static bool BatchNormShape(const nnvm::NodeAttrs& attrs,
   : param.axis);
   CHECK_LT(channelAxis, dshape.ndim()) << "Channel axis out of range: " << 
param.axis;
 
-  const int channelCount = dshape[channelAxis];
+  const index_t channelCount = dshape[channelAxis];
 
   if (!mxnet::ndim_is_known(dshape)) {
 return false;
diff --git a/src/operator/nn/layer_norm.cc b/src/operator/nn/layer_norm.cc
index d385b93..e3d641a 100644
--- a/src/operator/nn/layer_norm.cc
+++ b/src/operator/nn/layer_norm.cc
@@ -47,7 +47,7 @@ static bool LayerNormShape(const nnvm::NodeAttrs& attrs,
   CHECK(axis >= 0 && axis < dshape.ndim())
 << "Channel axis out of range: axis=" << param.axis;
 
-  const int channelCount = dshape[axis];
+  const index_t channelCount = dshape[axis];
 
   if (!mxnet::ndim_is_known(dshape)) {
 return false;



[incubator-mxnet] branch v1.7.x updated: add logic for no batch size while getting data arrays from executors (#17772) (#18122)

2020-04-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 8695537  add logic for no batch size while getting data arrays from 
executors (#17772) (#18122)
8695537 is described below

commit 86955370cd868b5d4f46f2f80f7632fd864773e3
Author: Manu Seth <22492939+mset...@users.noreply.github.com>
AuthorDate: Thu Apr 23 01:14:44 2020 -0700

add logic for no batch size while getting data arrays from executors 
(#17772) (#18122)

Co-authored-by: Ubuntu 

Co-authored-by: Ubuntu 
---
 python/mxnet/module/executor_group.py | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/python/mxnet/module/executor_group.py 
b/python/mxnet/module/executor_group.py
index d47665d..f2cb62f 100755
--- a/python/mxnet/module/executor_group.py
+++ b/python/mxnet/module/executor_group.py
@@ -308,8 +308,16 @@ class DataParallelExecutorGroup(object):
 def _collect_arrays(self):
 """Collect internal arrays from executors."""
 # convenient data structures
-self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in 
enumerate(self.execs)]
-for name, _ in self.data_shapes]
+
+# check if self.slices is populated, if not then that means that there 
is no batch size
+if self.slices:
+# based on batch size, slice up data for the given contexts 
(self.execs)
+self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e 
in enumerate(self.execs)]
+for name, _ in self.data_shapes]
+else:
+# just use the context index as index into the data
+self.data_arrays = [[(slice(i, i+1), e.arg_dict[name]) for i, e in 
enumerate(self.execs)]
+for name, _ in self.data_shapes]
 
 self.state_arrays = [[e.arg_dict[name] for e in self.execs]
  for name in self.state_names]



[incubator-mxnet] branch v1.7.x updated: change error tolerance for bf16 bn (#18110)

2020-04-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 6e956fd  change error tolerance for bf16 bn (#18110)
6e956fd is described below

commit 6e956fd2b78fcd20e732a1f1d915da630ea1d999
Author: rongzha1 
AuthorDate: Wed Apr 22 11:17:17 2020 +0800

change error tolerance for bf16 bn (#18110)
---
 tests/python/mkl/test_bf16_operator.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tests/python/mkl/test_bf16_operator.py 
b/tests/python/mkl/test_bf16_operator.py
index e4f4a93..b275c96 100644
--- a/tests/python/mkl/test_bf16_operator.py
+++ b/tests/python/mkl/test_bf16_operator.py
@@ -126,8 +126,8 @@ def test_bf16_bn():
 bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
 
 bn_bf16 = mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28), bf16_use_fp32_params=True, etol=1e-3)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, 
data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-3)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28), bf16_use_fp32_params=True, etol=1e-2)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, 
data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-2)
 
 @with_seed()
 def test_bf16_conv():
@@ -278,7 +278,7 @@ def test_bf16_fallback():
 bn_params = {"eps": 2e-05, "fix_gamma": False, "use_global_stats": True, 
"name": "bn"}
 bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
 bn_bf16=mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-3)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-2)
 
 conv_params = {"kernel": (3, 3, 3), "num_filter": 128, "pad": (1, 1, 1), 
"stride": (1, 1, 1), "no_bias": True, "name": "conv"}
 conv_fp32 = mx.sym.Convolution(data_sym_fp32, **conv_params)



[incubator-mxnet] branch v1.7.x updated: change error tolerance for bf16 bn (#18110)

2020-04-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 6e956fd  change error tolerance for bf16 bn (#18110)
6e956fd is described below

commit 6e956fd2b78fcd20e732a1f1d915da630ea1d999
Author: rongzha1 
AuthorDate: Wed Apr 22 11:17:17 2020 +0800

change error tolerance for bf16 bn (#18110)
---
 tests/python/mkl/test_bf16_operator.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tests/python/mkl/test_bf16_operator.py 
b/tests/python/mkl/test_bf16_operator.py
index e4f4a93..b275c96 100644
--- a/tests/python/mkl/test_bf16_operator.py
+++ b/tests/python/mkl/test_bf16_operator.py
@@ -126,8 +126,8 @@ def test_bf16_bn():
 bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
 
 bn_bf16 = mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28), bf16_use_fp32_params=True, etol=1e-3)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, 
data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-3)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28), bf16_use_fp32_params=True, etol=1e-2)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, 
data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-2)
 
 @with_seed()
 def test_bf16_conv():
@@ -278,7 +278,7 @@ def test_bf16_fallback():
 bn_params = {"eps": 2e-05, "fix_gamma": False, "use_global_stats": True, 
"name": "bn"}
 bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
 bn_bf16=mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-3)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-2)
 
 conv_params = {"kernel": (3, 3, 3), "num_filter": 128, "pad": (1, 1, 1), 
"stride": (1, 1, 1), "no_bias": True, "name": "conv"}
 conv_fp32 = mx.sym.Convolution(data_sym_fp32, **conv_params)



[incubator-mxnet] branch v1.7.x updated: change error tolerance for bf16 bn (#18110)

2020-04-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
 new 6e956fd  change error tolerance for bf16 bn (#18110)
6e956fd is described below

commit 6e956fd2b78fcd20e732a1f1d915da630ea1d999
Author: rongzha1 
AuthorDate: Wed Apr 22 11:17:17 2020 +0800

change error tolerance for bf16 bn (#18110)
---
 tests/python/mkl/test_bf16_operator.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tests/python/mkl/test_bf16_operator.py 
b/tests/python/mkl/test_bf16_operator.py
index e4f4a93..b275c96 100644
--- a/tests/python/mkl/test_bf16_operator.py
+++ b/tests/python/mkl/test_bf16_operator.py
@@ -126,8 +126,8 @@ def test_bf16_bn():
 bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
 
 bn_bf16 = mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28), bf16_use_fp32_params=True, etol=1e-3)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, 
data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-3)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28), bf16_use_fp32_params=True, etol=1e-2)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, 
data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-2)
 
 @with_seed()
 def test_bf16_conv():
@@ -278,7 +278,7 @@ def test_bf16_fallback():
 bn_params = {"eps": 2e-05, "fix_gamma": False, "use_global_stats": True, 
"name": "bn"}
 bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
 bn_bf16=mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-3)
+check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 
32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-2)
 
 conv_params = {"kernel": (3, 3, 3), "num_filter": 128, "pad": (1, 1, 1), 
"stride": (1, 1, 1), "no_bias": True, "name": "conv"}
 conv_fp32 = mx.sym.Convolution(data_sym_fp32, **conv_params)



[incubator-mxnet] branch v1.7.x updated (814530d -> 4cf2ad3)

2020-04-18 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 814530d  Cherry-pick of #17995 and #17937 to 1.x branch (#18041)
 add 4cf2ad3  [v1.x] Backport #17689 and #17884 to v1.x branch (#18064)

No new revisions were added by this update.

Summary of changes:
 src/operator/nn/mkldnn/mkldnn_act.cc   |   8 +-
 src/operator/nn/mkldnn/mkldnn_base-inl.h   |  41 ++--
 src/operator/nn/mkldnn/mkldnn_base.cc  |  47 ++--
 src/operator/nn/mkldnn/mkldnn_convolution.cc   |  60 -
 src/operator/nn/mkldnn/mkldnn_pooling-inl.h|  59 +++--
 src/operator/nn/mkldnn/mkldnn_pooling.cc   | 254 -
 src/operator/nn/pooling.cc |   6 +-
 .../quantization/mkldnn/mkldnn_quantized_act.cc|   2 +-
 .../mkldnn/mkldnn_quantized_pooling.cc |   4 +-
 src/operator/quantization/quantized_conv.cc|  97 ++--
 src/operator/quantization/quantized_pooling.cc | 100 +---
 src/operator/subgraph/mkldnn/mkldnn_conv.cc|   9 +-
 .../subgraph/mkldnn/mkldnn_conv_property.h |   3 +-
 .../subgraph/mkldnn/mkldnn_subgraph_base-inl.h |   2 +-
 src/operator/tensor/matrix_op.cc   |   2 +-
 tests/cpp/operator/mkldnn_operator_test.cc |   4 +-
 tests/python/mkl/test_mkldnn.py|  11 +-
 tests/python/quantization/test_quantization.py |  54 +++--
 18 files changed, 493 insertions(+), 270 deletions(-)



[incubator-mxnet] branch v1.7.x updated (bf99f27 -> 814530d)

2020-04-18 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


 discard bf99f27  [1.7] MXNet Extension PRs (#17623, #17569, #17762) (#18063)
 add 91d595a  bump up 1.x branch to 1.7.0 (#17741)
 add 3b83cd8  Bump up additional scala 1.x branch to 1.7.0 (#17765)
 add 21fc103  [Website 2.0] Nightly Build for v1.x (#17956)
 add db93398  Pinning rvm version to satisfy Jekyll build (#18016)
 add 0d3aa67  Workaround gnu_tls handshake error on Ubuntu 14.04 Nvidia 
Docker (#18044)
 add 6fa374b  [v1.x] Backport #17702 and #17872 to v1.x branch (#18038)
 add 50d6d7d  [mkldnn]Mkldnn bn opt backport from master to 1.7x (#18009)
 add 2cf7219  [v1.x] Update 3rdparty/mkldnn remote URL and pin to v1.3 
(#17972) (#18033)
 add 3f920ae  Optimize AddTakeGrad Tensor Sum (#17906) (#18045)
 add 2ccbcec  GPU gemms true fp16 (#17466) (#18023)
 add 1afdfce  [1.7] Backport MXNet Extension PRs (#17623, #17569, #17762) 
#18063 (#18069)
 add b56571d  [v1.x] backport #17900 "[MKLDNN] support using any format in 
pooling backward" (#18067)
 add 8cfc64a  No tensor cores for fp32 interleaved attention, remove div by 
8 restriction (#17994) (#18085)
 add 2e22b5e  refactor codes and add an option to skip/check weight's 
version to reduce overhead (#17707) (#18039)
 add 3835139  Add gelu fuse ops (#18082) (#18092)
 add 814530d  Cherry-pick of #17995 and #17937 to 1.x branch (#18041)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (bf99f27)
\
 N -- N -- N   refs/heads/v1.7.x (814530d)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

No new revisions were added by this update.

Summary of changes:
 .gitmodules|   3 +-
 3rdparty/mkldnn|   2 +-
 3rdparty/mshadow/mshadow/tensor_cpu-inl.h  |   9 +-
 CMakeLists.txt |   1 +
 R-package/DESCRIPTION  |   2 +-
 ci/docker/Dockerfile.build.ubuntu_cpu_jekyll   |   2 +-
 ci/docker/install/centos7_core.sh  |  15 +-
 ci/docker/install/ubuntu_publish.sh|   4 +
 ...website_full_pr => Jenkinsfile_website_nightly} |  11 +-
 contrib/clojure-package/examples/bert/project.clj  |   2 +-
 .../clojure-package/examples/captcha/project.clj   |   2 +-
 .../examples/cnn-text-classification/project.clj   |   2 +-
 contrib/clojure-package/examples/gan/project.clj   |   4 +-
 .../examples/imclassification/project.clj  |   2 +-
 .../examples/infer/imageclassifier/project.clj |   2 +-
 .../examples/infer/objectdetector/project.clj  |   2 +-
 .../examples/infer/predictor/project.clj   |   2 +-
 .../clojure-package/examples/module/project.clj|   2 +-
 .../examples/multi-label/project.clj   |   2 +-
 .../examples/neural-style/project.clj  |   4 +-
 .../examples/pre-trained-models/project.clj|   2 +-
 .../clojure-package/examples/profiler/project.clj  |   2 +-
 contrib/clojure-package/examples/rnn/project.clj   |   2 +-
 .../clojure-package/examples/tutorial/project.clj  |   2 +-
 .../examples/visualization/project.clj |   2 +-
 contrib/clojure-package/project.clj|   2 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   4 +
 .../docs/tutorials/mxnet_scala_on_intellij.md  |   4 +-
 include/mxnet/base.h   |   2 +-
 julia/NEWS.md  |   3 +
 mkldnn.mk  |   1 +
 python/mxnet/gluon/rnn/rnn_layer.py|   1 +
 python/mxnet/libinfo.py|   2 +-
 scala-package/README.md|  10 +-
 scala-package/mxnet-demo/java-demo/README.md   |   4 +-
 scala-package/mxnet-demo/java-demo/pom.xml |   6 +-
 scala-package/mxnet-demo/scala-demo/pom.xml|   2 +-
 scala-package/pom.xml  |   2 +-
 snapcraft.yaml |   2 +-
 src/executor/pointwise_fusion_pass.cc  |  14 ++
 src/operator/contrib/transformer.cu|  83 ++--
 src/operator/fusion/fused_op-inl.h |  31 ++-
 src/operator/fusion/fused_op.cu|  43 
 src/operator/linalg_imp

[incubator-mxnet] branch v1.7.x updated (bf99f27 -> 814530d)

2020-04-18 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


 discard bf99f27  [1.7] MXNet Extension PRs (#17623, #17569, #17762) (#18063)
 add 91d595a  bump up 1.x branch to 1.7.0 (#17741)
 add 3b83cd8  Bump up additional scala 1.x branch to 1.7.0 (#17765)
 add 21fc103  [Website 2.0] Nightly Build for v1.x (#17956)
 add db93398  Pinning rvm version to satisfy Jekyll build (#18016)
 add 0d3aa67  Workaround gnu_tls handshake error on Ubuntu 14.04 Nvidia 
Docker (#18044)
 add 6fa374b  [v1.x] Backport #17702 and #17872 to v1.x branch (#18038)
 add 50d6d7d  [mkldnn]Mkldnn bn opt backport from master to 1.7x (#18009)
 add 2cf7219  [v1.x] Update 3rdparty/mkldnn remote URL and pin to v1.3 
(#17972) (#18033)
 add 3f920ae  Optimize AddTakeGrad Tensor Sum (#17906) (#18045)
 add 2ccbcec  GPU gemms true fp16 (#17466) (#18023)
 add 1afdfce  [1.7] Backport MXNet Extension PRs (#17623, #17569, #17762) 
#18063 (#18069)
 add b56571d  [v1.x] backport #17900 "[MKLDNN] support using any format in 
pooling backward" (#18067)
 add 8cfc64a  No tensor cores for fp32 interleaved attention, remove div by 
8 restriction (#17994) (#18085)
 add 2e22b5e  refactor codes and add an option to skip/check weight's 
version to reduce overhead (#17707) (#18039)
 add 3835139  Add gelu fuse ops (#18082) (#18092)
 add 814530d  Cherry-pick of #17995 and #17937 to 1.x branch (#18041)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (bf99f27)
\
 N -- N -- N   refs/heads/v1.7.x (814530d)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

No new revisions were added by this update.

Summary of changes:
 .gitmodules|   3 +-
 3rdparty/mkldnn|   2 +-
 3rdparty/mshadow/mshadow/tensor_cpu-inl.h  |   9 +-
 CMakeLists.txt |   1 +
 R-package/DESCRIPTION  |   2 +-
 ci/docker/Dockerfile.build.ubuntu_cpu_jekyll   |   2 +-
 ci/docker/install/centos7_core.sh  |  15 +-
 ci/docker/install/ubuntu_publish.sh|   4 +
 ...website_full_pr => Jenkinsfile_website_nightly} |  11 +-
 contrib/clojure-package/examples/bert/project.clj  |   2 +-
 .../clojure-package/examples/captcha/project.clj   |   2 +-
 .../examples/cnn-text-classification/project.clj   |   2 +-
 contrib/clojure-package/examples/gan/project.clj   |   4 +-
 .../examples/imclassification/project.clj  |   2 +-
 .../examples/infer/imageclassifier/project.clj |   2 +-
 .../examples/infer/objectdetector/project.clj  |   2 +-
 .../examples/infer/predictor/project.clj   |   2 +-
 .../clojure-package/examples/module/project.clj|   2 +-
 .../examples/multi-label/project.clj   |   2 +-
 .../examples/neural-style/project.clj  |   4 +-
 .../examples/pre-trained-models/project.clj|   2 +-
 .../clojure-package/examples/profiler/project.clj  |   2 +-
 contrib/clojure-package/examples/rnn/project.clj   |   2 +-
 .../clojure-package/examples/tutorial/project.clj  |   2 +-
 .../examples/visualization/project.clj |   2 +-
 contrib/clojure-package/project.clj|   2 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   4 +
 .../docs/tutorials/mxnet_scala_on_intellij.md  |   4 +-
 include/mxnet/base.h   |   2 +-
 julia/NEWS.md  |   3 +
 mkldnn.mk  |   1 +
 python/mxnet/gluon/rnn/rnn_layer.py|   1 +
 python/mxnet/libinfo.py|   2 +-
 scala-package/README.md|  10 +-
 scala-package/mxnet-demo/java-demo/README.md   |   4 +-
 scala-package/mxnet-demo/java-demo/pom.xml |   6 +-
 scala-package/mxnet-demo/scala-demo/pom.xml|   2 +-
 scala-package/pom.xml  |   2 +-
 snapcraft.yaml |   2 +-
 src/executor/pointwise_fusion_pass.cc  |  14 ++
 src/operator/contrib/transformer.cu|  83 ++--
 src/operator/fusion/fused_op-inl.h |  31 ++-
 src/operator/fusion/fused_op.cu|  43 
 src/operator/linalg_imp

[incubator-mxnet] branch v1.7.x updated (bf99f27 -> 814530d)

2020-04-18 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


 discard bf99f27  [1.7] MXNet Extension PRs (#17623, #17569, #17762) (#18063)
 add 91d595a  bump up 1.x branch to 1.7.0 (#17741)
 add 3b83cd8  Bump up additional scala 1.x branch to 1.7.0 (#17765)
 add 21fc103  [Website 2.0] Nightly Build for v1.x (#17956)
 add db93398  Pinning rvm version to satisfy Jekyll build (#18016)
 add 0d3aa67  Workaround gnu_tls handshake error on Ubuntu 14.04 Nvidia 
Docker (#18044)
 add 6fa374b  [v1.x] Backport #17702 and #17872 to v1.x branch (#18038)
 add 50d6d7d  [mkldnn]Mkldnn bn opt backport from master to 1.7x (#18009)
 add 2cf7219  [v1.x] Update 3rdparty/mkldnn remote URL and pin to v1.3 
(#17972) (#18033)
 add 3f920ae  Optimize AddTakeGrad Tensor Sum (#17906) (#18045)
 add 2ccbcec  GPU gemms true fp16 (#17466) (#18023)
 add 1afdfce  [1.7] Backport MXNet Extension PRs (#17623, #17569, #17762) 
#18063 (#18069)
 add b56571d  [v1.x] backport #17900 "[MKLDNN] support using any format in 
pooling backward" (#18067)
 add 8cfc64a  No tensor cores for fp32 interleaved attention, remove div by 
8 restriction (#17994) (#18085)
 add 2e22b5e  refactor codes and add an option to skip/check weight's 
version to reduce overhead (#17707) (#18039)
 add 3835139  Add gelu fuse ops (#18082) (#18092)
 add 814530d  Cherry-pick of #17995 and #17937 to 1.x branch (#18041)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (bf99f27)
\
 N -- N -- N   refs/heads/v1.7.x (814530d)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

No new revisions were added by this update.

Summary of changes:
 .gitmodules|   3 +-
 3rdparty/mkldnn|   2 +-
 3rdparty/mshadow/mshadow/tensor_cpu-inl.h  |   9 +-
 CMakeLists.txt |   1 +
 R-package/DESCRIPTION  |   2 +-
 ci/docker/Dockerfile.build.ubuntu_cpu_jekyll   |   2 +-
 ci/docker/install/centos7_core.sh  |  15 +-
 ci/docker/install/ubuntu_publish.sh|   4 +
 ...website_full_pr => Jenkinsfile_website_nightly} |  11 +-
 contrib/clojure-package/examples/bert/project.clj  |   2 +-
 .../clojure-package/examples/captcha/project.clj   |   2 +-
 .../examples/cnn-text-classification/project.clj   |   2 +-
 contrib/clojure-package/examples/gan/project.clj   |   4 +-
 .../examples/imclassification/project.clj  |   2 +-
 .../examples/infer/imageclassifier/project.clj |   2 +-
 .../examples/infer/objectdetector/project.clj  |   2 +-
 .../examples/infer/predictor/project.clj   |   2 +-
 .../clojure-package/examples/module/project.clj|   2 +-
 .../examples/multi-label/project.clj   |   2 +-
 .../examples/neural-style/project.clj  |   4 +-
 .../examples/pre-trained-models/project.clj|   2 +-
 .../clojure-package/examples/profiler/project.clj  |   2 +-
 contrib/clojure-package/examples/rnn/project.clj   |   2 +-
 .../clojure-package/examples/tutorial/project.clj  |   2 +-
 .../examples/visualization/project.clj |   2 +-
 contrib/clojure-package/project.clj|   2 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   4 +
 .../docs/tutorials/mxnet_scala_on_intellij.md  |   4 +-
 include/mxnet/base.h   |   2 +-
 julia/NEWS.md  |   3 +
 mkldnn.mk  |   1 +
 python/mxnet/gluon/rnn/rnn_layer.py|   1 +
 python/mxnet/libinfo.py|   2 +-
 scala-package/README.md|  10 +-
 scala-package/mxnet-demo/java-demo/README.md   |   4 +-
 scala-package/mxnet-demo/java-demo/pom.xml |   6 +-
 scala-package/mxnet-demo/scala-demo/pom.xml|   2 +-
 scala-package/pom.xml  |   2 +-
 snapcraft.yaml |   2 +-
 src/executor/pointwise_fusion_pass.cc  |  14 ++
 src/operator/contrib/transformer.cu|  83 ++--
 src/operator/fusion/fused_op-inl.h |  31 ++-
 src/operator/fusion/fused_op.cu|  43 
 src/operator/linalg_imp

[incubator-mxnet] branch v1.7.x updated (bf99f27 -> 814530d)

2020-04-18 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


 discard bf99f27  [1.7] MXNet Extension PRs (#17623, #17569, #17762) (#18063)
 add 91d595a  bump up 1.x branch to 1.7.0 (#17741)
 add 3b83cd8  Bump up additional scala 1.x branch to 1.7.0 (#17765)
 add 21fc103  [Website 2.0] Nightly Build for v1.x (#17956)
 add db93398  Pinning rvm version to satisfy Jekyll build (#18016)
 add 0d3aa67  Workaround gnu_tls handshake error on Ubuntu 14.04 Nvidia 
Docker (#18044)
 add 6fa374b  [v1.x] Backport #17702 and #17872 to v1.x branch (#18038)
 add 50d6d7d  [mkldnn]Mkldnn bn opt backport from master to 1.7x (#18009)
 add 2cf7219  [v1.x] Update 3rdparty/mkldnn remote URL and pin to v1.3 
(#17972) (#18033)
 add 3f920ae  Optimize AddTakeGrad Tensor Sum (#17906) (#18045)
 add 2ccbcec  GPU gemms true fp16 (#17466) (#18023)
 add 1afdfce  [1.7] Backport MXNet Extension PRs (#17623, #17569, #17762) 
#18063 (#18069)
 add b56571d  [v1.x] backport #17900 "[MKLDNN] support using any format in 
pooling backward" (#18067)
 add 8cfc64a  No tensor cores for fp32 interleaved attention, remove div by 
8 restriction (#17994) (#18085)
 add 2e22b5e  refactor codes and add an option to skip/check weight's 
version to reduce overhead (#17707) (#18039)
 add 3835139  Add gelu fuse ops (#18082) (#18092)
 add 814530d  Cherry-pick of #17995 and #17937 to 1.x branch (#18041)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (bf99f27)
\
 N -- N -- N   refs/heads/v1.7.x (814530d)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

No new revisions were added by this update.

Summary of changes:
 .gitmodules|   3 +-
 3rdparty/mkldnn|   2 +-
 3rdparty/mshadow/mshadow/tensor_cpu-inl.h  |   9 +-
 CMakeLists.txt |   1 +
 R-package/DESCRIPTION  |   2 +-
 ci/docker/Dockerfile.build.ubuntu_cpu_jekyll   |   2 +-
 ci/docker/install/centos7_core.sh  |  15 +-
 ci/docker/install/ubuntu_publish.sh|   4 +
 ...website_full_pr => Jenkinsfile_website_nightly} |  11 +-
 contrib/clojure-package/examples/bert/project.clj  |   2 +-
 .../clojure-package/examples/captcha/project.clj   |   2 +-
 .../examples/cnn-text-classification/project.clj   |   2 +-
 contrib/clojure-package/examples/gan/project.clj   |   4 +-
 .../examples/imclassification/project.clj  |   2 +-
 .../examples/infer/imageclassifier/project.clj |   2 +-
 .../examples/infer/objectdetector/project.clj  |   2 +-
 .../examples/infer/predictor/project.clj   |   2 +-
 .../clojure-package/examples/module/project.clj|   2 +-
 .../examples/multi-label/project.clj   |   2 +-
 .../examples/neural-style/project.clj  |   4 +-
 .../examples/pre-trained-models/project.clj|   2 +-
 .../clojure-package/examples/profiler/project.clj  |   2 +-
 contrib/clojure-package/examples/rnn/project.clj   |   2 +-
 .../clojure-package/examples/tutorial/project.clj  |   2 +-
 .../examples/visualization/project.clj |   2 +-
 contrib/clojure-package/project.clj|   2 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   4 +
 .../docs/tutorials/mxnet_scala_on_intellij.md  |   4 +-
 include/mxnet/base.h   |   2 +-
 julia/NEWS.md  |   3 +
 mkldnn.mk  |   1 +
 python/mxnet/gluon/rnn/rnn_layer.py|   1 +
 python/mxnet/libinfo.py|   2 +-
 scala-package/README.md|  10 +-
 scala-package/mxnet-demo/java-demo/README.md   |   4 +-
 scala-package/mxnet-demo/java-demo/pom.xml |   6 +-
 scala-package/mxnet-demo/scala-demo/pom.xml|   2 +-
 scala-package/pom.xml  |   2 +-
 snapcraft.yaml |   2 +-
 src/executor/pointwise_fusion_pass.cc  |  14 ++
 src/operator/contrib/transformer.cu|  83 ++--
 src/operator/fusion/fused_op-inl.h |  31 ++-
 src/operator/fusion/fused_op.cu|  43 
 src/operator/linalg_imp

[incubator-mxnet] branch v1.7.x updated (bf99f27 -> 814530d)

2020-04-18 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


 discard bf99f27  [1.7] MXNet Extension PRs (#17623, #17569, #17762) (#18063)
 add 91d595a  bump up 1.x branch to 1.7.0 (#17741)
 add 3b83cd8  Bump up additional scala 1.x branch to 1.7.0 (#17765)
 add 21fc103  [Website 2.0] Nightly Build for v1.x (#17956)
 add db93398  Pinning rvm version to satisfy Jekyll build (#18016)
 add 0d3aa67  Workaround gnu_tls handshake error on Ubuntu 14.04 Nvidia 
Docker (#18044)
 add 6fa374b  [v1.x] Backport #17702 and #17872 to v1.x branch (#18038)
 add 50d6d7d  [mkldnn]Mkldnn bn opt backport from master to 1.7x (#18009)
 add 2cf7219  [v1.x] Update 3rdparty/mkldnn remote URL and pin to v1.3 
(#17972) (#18033)
 add 3f920ae  Optimize AddTakeGrad Tensor Sum (#17906) (#18045)
 add 2ccbcec  GPU gemms true fp16 (#17466) (#18023)
 add 1afdfce  [1.7] Backport MXNet Extension PRs (#17623, #17569, #17762) 
#18063 (#18069)
 add b56571d  [v1.x] backport #17900 "[MKLDNN] support using any format in 
pooling backward" (#18067)
 add 8cfc64a  No tensor cores for fp32 interleaved attention, remove div by 
8 restriction (#17994) (#18085)
 add 2e22b5e  refactor codes and add an option to skip/check weight's 
version to reduce overhead (#17707) (#18039)
 add 3835139  Add gelu fuse ops (#18082) (#18092)
 add 814530d  Cherry-pick of #17995 and #17937 to 1.x branch (#18041)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (bf99f27)
\
 N -- N -- N   refs/heads/v1.7.x (814530d)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

No new revisions were added by this update.

Summary of changes:
 .gitmodules|   3 +-
 3rdparty/mkldnn|   2 +-
 3rdparty/mshadow/mshadow/tensor_cpu-inl.h  |   9 +-
 CMakeLists.txt |   1 +
 R-package/DESCRIPTION  |   2 +-
 ci/docker/Dockerfile.build.ubuntu_cpu_jekyll   |   2 +-
 ci/docker/install/centos7_core.sh  |  15 +-
 ci/docker/install/ubuntu_publish.sh|   4 +
 ...website_full_pr => Jenkinsfile_website_nightly} |  11 +-
 contrib/clojure-package/examples/bert/project.clj  |   2 +-
 .../clojure-package/examples/captcha/project.clj   |   2 +-
 .../examples/cnn-text-classification/project.clj   |   2 +-
 contrib/clojure-package/examples/gan/project.clj   |   4 +-
 .../examples/imclassification/project.clj  |   2 +-
 .../examples/infer/imageclassifier/project.clj |   2 +-
 .../examples/infer/objectdetector/project.clj  |   2 +-
 .../examples/infer/predictor/project.clj   |   2 +-
 .../clojure-package/examples/module/project.clj|   2 +-
 .../examples/multi-label/project.clj   |   2 +-
 .../examples/neural-style/project.clj  |   4 +-
 .../examples/pre-trained-models/project.clj|   2 +-
 .../clojure-package/examples/profiler/project.clj  |   2 +-
 contrib/clojure-package/examples/rnn/project.clj   |   2 +-
 .../clojure-package/examples/tutorial/project.clj  |   2 +-
 .../examples/visualization/project.clj |   2 +-
 contrib/clojure-package/project.clj|   2 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   4 +
 .../docs/tutorials/mxnet_scala_on_intellij.md  |   4 +-
 include/mxnet/base.h   |   2 +-
 julia/NEWS.md  |   3 +
 mkldnn.mk  |   1 +
 python/mxnet/gluon/rnn/rnn_layer.py|   1 +
 python/mxnet/libinfo.py|   2 +-
 scala-package/README.md|  10 +-
 scala-package/mxnet-demo/java-demo/README.md   |   4 +-
 scala-package/mxnet-demo/java-demo/pom.xml |   6 +-
 scala-package/mxnet-demo/scala-demo/pom.xml|   2 +-
 scala-package/pom.xml  |   2 +-
 snapcraft.yaml |   2 +-
 src/executor/pointwise_fusion_pass.cc  |  14 ++
 src/operator/contrib/transformer.cu|  83 ++--
 src/operator/fusion/fused_op-inl.h |  31 ++-
 src/operator/fusion/fused_op.cu|  43 
 src/operator/linalg_imp

[incubator-mxnet] branch master updated: [MKLDNN] apply MKLDNNRun to quantized_act/transpose (#17689)

2020-02-29 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
 new 88b3051  [MKLDNN] apply MKLDNNRun to quantized_act/transpose (#17689)
88b3051 is described below

commit 88b3051f290c994daed5cac7c9724319b7b6aba0
Author: Wuxun Zhang 
AuthorDate: Sat Feb 29 21:17:38 2020 +0800

[MKLDNN] apply MKLDNNRun to quantized_act/transpose (#17689)

* apply MKLDNNRun to quantized_act/transpose ops

* run CI
---
 src/operator/quantization/mkldnn/mkldnn_quantized_act.cc | 2 +-
 src/operator/tensor/matrix_op.cc | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc 
b/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc
index bc69cb5..86acac8 100644
--- a/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc
+++ b/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc
@@ -40,7 +40,7 @@ static void MKLDNNQuantizedActForward(const nnvm::NodeAttrs& 
attrs,
   << "_contrib_quantized_act op only supports uint8 and int8 as input "
  "type";
 
-  MKLDNNActivationForward(attrs, ctx, in_data[0], req[0], out_data[0]);
+  MKLDNNRun(MKLDNNActivationForward, attrs, ctx, in_data[0], req[0], 
out_data[0]);
   out_data[1].data().dptr()[0] = in_data[1].data().dptr()[0];
   out_data[2].data().dptr()[0] = in_data[2].data().dptr()[0];
 }
diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc
index f00caf3..9e63730 100644
--- a/src/operator/tensor/matrix_op.cc
+++ b/src/operator/tensor/matrix_op.cc
@@ -289,7 +289,7 @@ static void TransposeComputeExCPU(const nnvm::NodeAttrs& 
attrs,
   CHECK_EQ(outputs.size(), 1U);
 
   if (SupportMKLDNNTranspose(param, inputs[0]) && req[0] == kWriteTo) {
-MKLDNNTransposeForward(attrs, ctx, inputs[0], req[0], outputs[0]);
+MKLDNNRun(MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], 
outputs[0]);
 return;
   }
   FallBackCompute(Transpose, attrs, ctx, inputs, req, outputs);



[incubator-mxnet] branch master updated (2219f1a -> 9883b99)

2020-02-08 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 2219f1a  cmake: staticbuild for binary distribution (#17448)
 add 9883b99  add mkldnn softmax backward  (#17170)

No new revisions were added by this update.

Summary of changes:
 src/operator/nn/mkldnn/mkldnn_ops-inl.h  |  4 ++
 src/operator/nn/mkldnn/mkldnn_softmax.cc | 84 
 src/operator/nn/softmax.cc   | 41 +++-
 3 files changed, 127 insertions(+), 2 deletions(-)



[incubator-mxnet] branch master updated (0392514 -> cc4632d)

2020-02-06 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 0392514  [OpPerf] Implement remaining random sampling ops (#17502)
 add cc4632d  fix custom op makefile (#17516)

No new revisions were added by this update.

Summary of changes:
 Makefile  | 19 +++
 example/extensions/lib_custom_op/test_relu.py |  1 +
 2 files changed, 12 insertions(+), 8 deletions(-)



[incubator-mxnet] branch v1.6.x updated: [v1.6.x] Cherry-pick MKL-DNN Rnn operator enhancements to v1.6.x (#17225)

2020-01-06 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.6.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.6.x by this push:
 new ad1ff3a  [v1.6.x] Cherry-pick MKL-DNN Rnn operator enhancements to 
v1.6.x (#17225)
ad1ff3a is described below

commit ad1ff3aa8532f2f7e42a732f0d35dfb0574ca05c
Author: Zixuan Wei 
AuthorDate: Tue Jan 7 09:50:53 2020 +0800

[v1.6.x] Cherry-pick MKL-DNN Rnn operator enhancements to v1.6.x (#17225)

* [MKLDNN] mkldnn RNN operator enhancement (#17075)

* mkldnn rnn operator enhancement

`add` operation support

Rename AddTo

Add MXNET_USE_MKLDNN_RNN env

Add Env var for switching to naive RNN impl and naive add/copy impl

* Re-run CI, op:test_reduce failed on Unix-CPU

* Rerun CI, Python2 CPU on Unix-CPU timeout

* MKL-DNN RNN backward path enhancement (#17183)

* Flush memory before RNN backward primitive

* Add gluon rnn unit test for gradients check

* Cache reorder

* Re-write rnn supporting check

* Update OpSignature.AddSign to avoid potential hash collision for
rnn-packed memory

Get the data type from mkldnn memory descriptor when setting grad handle
---
 docs/static_site/src/pages/api/faq/env_var.md |  12 +-
 src/common/utils.h|  20 +-
 src/operator/nn/mkldnn/mkldnn_base-inl.h  |   9 +-
 src/operator/nn/mkldnn/mkldnn_rnn-inl.h   |  38 ++-
 src/operator/nn/mkldnn/mkldnn_rnn.cc  | 430 +++---
 src/operator/operator_common.h|  18 ++
 src/operator/rnn.cc   |  13 +-
 tests/python/unittest/test_gluon_rnn.py   | 124 
 tests/python/unittest/test_operator.py|  21 +-
 9 files changed, 466 insertions(+), 219 deletions(-)

diff --git a/docs/static_site/src/pages/api/faq/env_var.md 
b/docs/static_site/src/pages/api/faq/env_var.md
index e4fe58a..d63da61 100644
--- a/docs/static_site/src/pages/api/faq/env_var.md
+++ b/docs/static_site/src/pages/api/faq/env_var.md
@@ -283,11 +283,11 @@ If ctypes is used, it must be 
`mxnet._ctypes.ndarray.NDArrayBase`.
   If no such algorithm exists given other constraints, MXNet will error out. 
This variable affects the choice
   of CUDNN convolution algorithms. Please see [CUDNN developer 
guide](https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html)
 for more details.
 
-* MXNET_CPU_PARALLEL_COPY_SIZE
+* MXNET_CPU_PARALLEL_SIZE
   - Values: Int ```(default=20)```
-  - The minimum size to call parallel copy by OpenMP in CPU2CPU mode.
-  - When the array size is bigger than or equal to  this threshold, 
NDArray::Copy(from, to) is implemented by OpenMP with the Recommended OMP 
Thread Count.
-  - When the array size is less than this threshold, NDArray::Copy(from , to)) 
is implemented by memcpy in single thread.
+  - The minimum size to call parallel operations by OpenMP for CPU context.
+  - When the array size is bigger than or equal to this threshold, the 
operation implemented by OpenMP is executed with the Recommended OMP Thread 
Count.
+  - When the array size is less than this threshold, the operation is 
implemented naively in single thread.
 
 * MXNET_OPTIMIZER_AGGREGATION_SIZE
   - Values: Int ```(default=4)```
@@ -343,6 +343,10 @@ If ctypes is used, it must be 
`mxnet._ctypes.ndarray.NDArrayBase`.
   - Values: 0(false) or 1(true) ```(default=1)```
   - If this variable is set, MXNet will simplify the computation graph, 
eliminating duplicated operations on the same inputs.
 
+* MXNET_USE_MKLDNN_RNN
+  - Values: 0(false) or 1(true) ```(default=1)```
+  - This variable controls whether to use the MKL-DNN backend in fused RNN 
operator for CPU context. There are two fusion implementations of RNN operator 
in MXNet. The MKL-DNN implementation has a better performance than the naive 
one, but the latter is more stable in the backward operation currently.
+
 Settings for Minimum Memory Usage
 -
 - Make sure ```min(MXNET_EXEC_NUM_TEMP, MXNET_GPU_WORKER_NTHREADS) = 1```
diff --git a/src/common/utils.h b/src/common/utils.h
index 0e3e354..fcb61b7 100644
--- a/src/common/utils.h
+++ b/src/common/utils.h
@@ -760,7 +760,7 @@ inline void EmplaceBackZeros(const NDArrayStorageType 
stype, const mxnet::TShape
  */
 template
 inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
-  static index_t copy_block_size = 
dmlc::GetEnv("MXNET_CPU_PARALLEL_COPY_SIZE", 20);
+  static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 
20);
   if (size >= copy_block_size) {
 #pragma omp parallel for 
num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
 for (index_t i = 0; i < size; ++i) {
@@ -772,6 +772,24 @@ inline void ParallelCopy(DType* dst, co

[incubator-mxnet] branch master updated (2a9ec0e -> 89fe1f6)

2020-01-02 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 2a9ec0e  Softmax primitive cache and in-place computation (#17152)
 add 89fe1f6  [MKLDNN] Support channel wise quantization for FullyConnected 
(#17187)

No new revisions were added by this update.

Summary of changes:
 include/mxnet/c_api.h  |   5 +-
 include/mxnet/op_attr_types.h  |   4 +-
 python/mxnet/contrib/quantization.py   |  52 ++-
 src/c_api/c_api_symbolic.cc|   3 +
 .../nn/mkldnn/mkldnn_fully_connected-inl.h |   7 +-
 src/operator/nn/mkldnn/mkldnn_fully_connected.cc   |  25 +-
 src/operator/quantization/quantize_graph_pass.cc   |  22 +-
 src/operator/quantization/quantized_batch_norm.cc  |   8 +-
 src/operator/quantization/quantized_indexing_op.cc |   8 +-
 src/operator/subgraph/mkldnn/mkldnn_common.h   | 138 
 src/operator/subgraph/mkldnn/mkldnn_conv.cc| 110 +--
 src/operator/subgraph/mkldnn/mkldnn_fc.cc  | 358 +++--
 tests/python/mkl/test_subgraph.py  |  69 ++--
 13 files changed, 536 insertions(+), 273 deletions(-)
 create mode 100644 src/operator/subgraph/mkldnn/mkldnn_common.h



[incubator-mxnet] branch master updated (2a9ec0e -> 89fe1f6)

2020-01-02 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 2a9ec0e  Softmax primitive cache and in-place computation (#17152)
 add 89fe1f6  [MKLDNN] Support channel wise quantization for FullyConnected 
(#17187)

No new revisions were added by this update.

Summary of changes:
 include/mxnet/c_api.h  |   5 +-
 include/mxnet/op_attr_types.h  |   4 +-
 python/mxnet/contrib/quantization.py   |  52 ++-
 src/c_api/c_api_symbolic.cc|   3 +
 .../nn/mkldnn/mkldnn_fully_connected-inl.h |   7 +-
 src/operator/nn/mkldnn/mkldnn_fully_connected.cc   |  25 +-
 src/operator/quantization/quantize_graph_pass.cc   |  22 +-
 src/operator/quantization/quantized_batch_norm.cc  |   8 +-
 src/operator/quantization/quantized_indexing_op.cc |   8 +-
 src/operator/subgraph/mkldnn/mkldnn_common.h   | 138 
 src/operator/subgraph/mkldnn/mkldnn_conv.cc| 110 +--
 src/operator/subgraph/mkldnn/mkldnn_fc.cc  | 358 +++--
 tests/python/mkl/test_subgraph.py  |  69 ++--
 13 files changed, 536 insertions(+), 273 deletions(-)
 create mode 100644 src/operator/subgraph/mkldnn/mkldnn_common.h



[incubator-mxnet] branch master updated (6c20fb9 -> 971ea4f)

2019-11-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 6c20fb9  [Numpy] Implementation npx.{sample}_n (#16876)
 add 971ea4f  Add arange_like to npx (#16883)

No new revisions were added by this update.

Summary of changes:
 src/operator/tensor/init_op.cc | 1 +
 1 file changed, 1 insertion(+)



[incubator-mxnet] branch master updated: change _generate_op_module_signature get_module_file open with encoding="utf-8", it fix some encode error in Chinese windows system. (#16738)

2019-11-09 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
 new 57b9b2c  change _generate_op_module_signature get_module_file open 
with encoding="utf-8",it fix some encode error in Chinese windows system. 
(#16738)
57b9b2c is described below

commit 57b9b2c769ce53506ad45051f5fe3b542d9bc2da
Author: Hu Shiwen 
AuthorDate: Sun Nov 10 09:03:03 2019 +0800

change _generate_op_module_signature get_module_file open with 
encoding="utf-8",it fix some encode error in Chinese windows system. (#16738)
---
 python/mxnet/base.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/mxnet/base.py b/python/mxnet/base.py
index 35acba3..1c46e16 100644
--- a/python/mxnet/base.py
+++ b/python/mxnet/base.py
@@ -672,7 +672,7 @@ def _generate_op_module_signature(root_namespace, 
module_name, op_code_gen_func)
 module_path = module_name.split('.')
 module_path[-1] = 'gen_' + module_path[-1]
 file_name = os.path.join(path, '..', *module_path) + '.py'
-module_file = open(file_name, 'w')
+module_file = open(file_name, 'w', encoding="utf-8")
 dependencies = {'symbol': ['from ._internal import SymbolBase',
'from ..base import _Null'],
 'ndarray': ['from ._internal import NDArrayBase',



[incubator-mxnet] branch mkldnn-v1.0 updated (5ed6d93 -> b38d6a9)

2019-10-31 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 5ed6d93  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0
 add b38d6a9  add default parameter for mkldnn rnn

No new revisions were added by this update.

Summary of changes:
 src/operator/nn/mkldnn/mkldnn_rnn.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (109be60 -> 5ed6d93)

2019-10-30 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 109be60  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0
 add 77e8f51  fix cuDNN RNN dtype_with_fallback_ bug (#16671)
 add 5ed6d93  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 src/operator/rnn-inl.h | 17 -
 1 file changed, 4 insertions(+), 13 deletions(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (b5eb25d -> 109be60)

2019-10-30 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5eb25d  disable MKLDNN FC backward
 add 86ed5f5  [NumPy][Operator] NumPy operator `may_share_memory` and 
`shares_memory` (#16533)
 add 60d74bc  Showing proper error message when an attempt is made to 
create large tensor but MXNet is not built with it (#16570)
 add 5aa74e0  Move ops which don't support FP16 dtype to FP32 list (#16668)
 add 8e50fd9  no such method => modified function args (#16610)
 add 27bddf8  [Numpy] Numpy operator diff (#15906)
 add 109be60  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 include/mxnet/c_api.h  |  12 +-
 julia/deps/build.jl|   2 +-
 julia/src/metric.jl|   2 +-
 python/mxnet/contrib/amp/lists/symbol.py   |   6 +-
 python/mxnet/ndarray/ndarray.py|  10 +
 python/mxnet/ndarray/numpy/_op.py  | 124 +++-
 python/mxnet/numpy/multiarray.py   | 128 +++-
 python/mxnet/numpy_dispatch_protocol.py|   5 +-
 python/mxnet/symbol/numpy/_symbol.py   |  90 -
 python/mxnet/symbol/symbol.py  |   7 +-
 src/c_api/c_api.cc |  13 +-
 src/c_api/c_api_ndarray.cc |   8 +-
 src/ndarray/ndarray.cc |  15 ++
 src/ndarray/ndarray_function.cc|   5 +
 src/operator/numpy/np_diff-inl.h   | 220 +
 src/operator/numpy/np_diff.cc  | 109 ++
 .../numpy/{random/np_uniform_op.cu => np_diff.cu}  |  14 +-
 src/operator/numpy/np_memory_op.cc |  62 ++
 .../quantize.cu => numpy/np_memory_op.cu}  |  12 +-
 src/operator/numpy/np_memory_op.h  |  75 +++
 src/operator/tensor/init_op.h  |  30 ++-
 .../python/unittest/test_numpy_interoperability.py |  39 
 tests/python/unittest/test_numpy_op.py |  79 +++-
 tests/python/unittest/test_operator.py |  56 ++
 24 files changed, 1087 insertions(+), 36 deletions(-)
 create mode 100644 src/operator/numpy/np_diff-inl.h
 create mode 100644 src/operator/numpy/np_diff.cc
 copy src/operator/numpy/{random/np_uniform_op.cu => np_diff.cu} (75%)
 create mode 100644 src/operator/numpy/np_memory_op.cc
 copy src/operator/{quantization/quantize.cu => numpy/np_memory_op.cu} (82%)
 create mode 100644 src/operator/numpy/np_memory_op.h



[incubator-mxnet] branch mkldnn-v1.0 updated (b5eb25d -> 109be60)

2019-10-30 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5eb25d  disable MKLDNN FC backward
 add 86ed5f5  [NumPy][Operator] NumPy operator `may_share_memory` and 
`shares_memory` (#16533)
 add 60d74bc  Showing proper error message when an attempt is made to 
create large tensor but MXNet is not built with it (#16570)
 add 5aa74e0  Move ops which don't support FP16 dtype to FP32 list (#16668)
 add 8e50fd9  no such method => modified function args (#16610)
 add 27bddf8  [Numpy] Numpy operator diff (#15906)
 add 109be60  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 include/mxnet/c_api.h  |  12 +-
 julia/deps/build.jl|   2 +-
 julia/src/metric.jl|   2 +-
 python/mxnet/contrib/amp/lists/symbol.py   |   6 +-
 python/mxnet/ndarray/ndarray.py|  10 +
 python/mxnet/ndarray/numpy/_op.py  | 124 +++-
 python/mxnet/numpy/multiarray.py   | 128 +++-
 python/mxnet/numpy_dispatch_protocol.py|   5 +-
 python/mxnet/symbol/numpy/_symbol.py   |  90 -
 python/mxnet/symbol/symbol.py  |   7 +-
 src/c_api/c_api.cc |  13 +-
 src/c_api/c_api_ndarray.cc |   8 +-
 src/ndarray/ndarray.cc |  15 ++
 src/ndarray/ndarray_function.cc|   5 +
 src/operator/numpy/np_diff-inl.h   | 220 +
 src/operator/numpy/np_diff.cc  | 109 ++
 .../numpy/{random/np_uniform_op.cu => np_diff.cu}  |  14 +-
 src/operator/numpy/np_memory_op.cc |  62 ++
 .../quantize.cu => numpy/np_memory_op.cu}  |  12 +-
 src/operator/numpy/np_memory_op.h  |  75 +++
 src/operator/tensor/init_op.h  |  30 ++-
 .../python/unittest/test_numpy_interoperability.py |  39 
 tests/python/unittest/test_numpy_op.py |  79 +++-
 tests/python/unittest/test_operator.py |  56 ++
 24 files changed, 1087 insertions(+), 36 deletions(-)
 create mode 100644 src/operator/numpy/np_diff-inl.h
 create mode 100644 src/operator/numpy/np_diff.cc
 copy src/operator/numpy/{random/np_uniform_op.cu => np_diff.cu} (75%)
 create mode 100644 src/operator/numpy/np_memory_op.cc
 copy src/operator/{quantization/quantize.cu => numpy/np_memory_op.cu} (82%)
 create mode 100644 src/operator/numpy/np_memory_op.h



[incubator-mxnet] branch mkldnn-v1.0 updated (b5eb25d -> 109be60)

2019-10-30 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5eb25d  disable MKLDNN FC backward
 add 86ed5f5  [NumPy][Operator] NumPy operator `may_share_memory` and 
`shares_memory` (#16533)
 add 60d74bc  Showing proper error message when an attempt is made to 
create large tensor but MXNet is not built with it (#16570)
 add 5aa74e0  Move ops which don't support FP16 dtype to FP32 list (#16668)
 add 8e50fd9  no such method => modified function args (#16610)
 add 27bddf8  [Numpy] Numpy operator diff (#15906)
 add 109be60  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 include/mxnet/c_api.h  |  12 +-
 julia/deps/build.jl|   2 +-
 julia/src/metric.jl|   2 +-
 python/mxnet/contrib/amp/lists/symbol.py   |   6 +-
 python/mxnet/ndarray/ndarray.py|  10 +
 python/mxnet/ndarray/numpy/_op.py  | 124 +++-
 python/mxnet/numpy/multiarray.py   | 128 +++-
 python/mxnet/numpy_dispatch_protocol.py|   5 +-
 python/mxnet/symbol/numpy/_symbol.py   |  90 -
 python/mxnet/symbol/symbol.py  |   7 +-
 src/c_api/c_api.cc |  13 +-
 src/c_api/c_api_ndarray.cc |   8 +-
 src/ndarray/ndarray.cc |  15 ++
 src/ndarray/ndarray_function.cc|   5 +
 src/operator/numpy/np_diff-inl.h   | 220 +
 src/operator/numpy/np_diff.cc  | 109 ++
 .../numpy/{random/np_uniform_op.cu => np_diff.cu}  |  14 +-
 src/operator/numpy/np_memory_op.cc |  62 ++
 .../quantize.cu => numpy/np_memory_op.cu}  |  12 +-
 src/operator/numpy/np_memory_op.h  |  75 +++
 src/operator/tensor/init_op.h  |  30 ++-
 .../python/unittest/test_numpy_interoperability.py |  39 
 tests/python/unittest/test_numpy_op.py |  79 +++-
 tests/python/unittest/test_operator.py |  56 ++
 24 files changed, 1087 insertions(+), 36 deletions(-)
 create mode 100644 src/operator/numpy/np_diff-inl.h
 create mode 100644 src/operator/numpy/np_diff.cc
 copy src/operator/numpy/{random/np_uniform_op.cu => np_diff.cu} (75%)
 create mode 100644 src/operator/numpy/np_memory_op.cc
 copy src/operator/{quantization/quantize.cu => numpy/np_memory_op.cu} (82%)
 create mode 100644 src/operator/numpy/np_memory_op.h



[incubator-mxnet] branch mkldnn-v1.0 updated (b5eb25d -> 109be60)

2019-10-30 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5eb25d  disable MKLDNN FC backward
 add 86ed5f5  [NumPy][Operator] NumPy operator `may_share_memory` and 
`shares_memory` (#16533)
 add 60d74bc  Showing proper error message when an attempt is made to 
create large tensor but MXNet is not built with it (#16570)
 add 5aa74e0  Move ops which don't support FP16 dtype to FP32 list (#16668)
 add 8e50fd9  no such method => modified function args (#16610)
 add 27bddf8  [Numpy] Numpy operator diff (#15906)
 add 109be60  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 include/mxnet/c_api.h  |  12 +-
 julia/deps/build.jl|   2 +-
 julia/src/metric.jl|   2 +-
 python/mxnet/contrib/amp/lists/symbol.py   |   6 +-
 python/mxnet/ndarray/ndarray.py|  10 +
 python/mxnet/ndarray/numpy/_op.py  | 124 +++-
 python/mxnet/numpy/multiarray.py   | 128 +++-
 python/mxnet/numpy_dispatch_protocol.py|   5 +-
 python/mxnet/symbol/numpy/_symbol.py   |  90 -
 python/mxnet/symbol/symbol.py  |   7 +-
 src/c_api/c_api.cc |  13 +-
 src/c_api/c_api_ndarray.cc |   8 +-
 src/ndarray/ndarray.cc |  15 ++
 src/ndarray/ndarray_function.cc|   5 +
 src/operator/numpy/np_diff-inl.h   | 220 +
 src/operator/numpy/np_diff.cc  | 109 ++
 .../numpy/{random/np_uniform_op.cu => np_diff.cu}  |  14 +-
 src/operator/numpy/np_memory_op.cc |  62 ++
 .../quantize.cu => numpy/np_memory_op.cu}  |  12 +-
 src/operator/numpy/np_memory_op.h  |  75 +++
 src/operator/tensor/init_op.h  |  30 ++-
 .../python/unittest/test_numpy_interoperability.py |  39 
 tests/python/unittest/test_numpy_op.py |  79 +++-
 tests/python/unittest/test_operator.py |  56 ++
 24 files changed, 1087 insertions(+), 36 deletions(-)
 create mode 100644 src/operator/numpy/np_diff-inl.h
 create mode 100644 src/operator/numpy/np_diff.cc
 copy src/operator/numpy/{random/np_uniform_op.cu => np_diff.cu} (75%)
 create mode 100644 src/operator/numpy/np_memory_op.cc
 copy src/operator/{quantization/quantize.cu => numpy/np_memory_op.cu} (82%)
 create mode 100644 src/operator/numpy/np_memory_op.h



[incubator-mxnet] branch mkldnn-v1.0 updated (b5eb25d -> 109be60)

2019-10-30 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5eb25d  disable MKLDNN FC backward
 add 86ed5f5  [NumPy][Operator] NumPy operator `may_share_memory` and 
`shares_memory` (#16533)
 add 60d74bc  Showing proper error message when an attempt is made to 
create large tensor but MXNet is not built with it (#16570)
 add 5aa74e0  Move ops which don't support FP16 dtype to FP32 list (#16668)
 add 8e50fd9  no such method => modified function args (#16610)
 add 27bddf8  [Numpy] Numpy operator diff (#15906)
 add 109be60  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 include/mxnet/c_api.h  |  12 +-
 julia/deps/build.jl|   2 +-
 julia/src/metric.jl|   2 +-
 python/mxnet/contrib/amp/lists/symbol.py   |   6 +-
 python/mxnet/ndarray/ndarray.py|  10 +
 python/mxnet/ndarray/numpy/_op.py  | 124 +++-
 python/mxnet/numpy/multiarray.py   | 128 +++-
 python/mxnet/numpy_dispatch_protocol.py|   5 +-
 python/mxnet/symbol/numpy/_symbol.py   |  90 -
 python/mxnet/symbol/symbol.py  |   7 +-
 src/c_api/c_api.cc |  13 +-
 src/c_api/c_api_ndarray.cc |   8 +-
 src/ndarray/ndarray.cc |  15 ++
 src/ndarray/ndarray_function.cc|   5 +
 src/operator/numpy/np_diff-inl.h   | 220 +
 src/operator/numpy/np_diff.cc  | 109 ++
 .../numpy/{random/np_uniform_op.cu => np_diff.cu}  |  14 +-
 src/operator/numpy/np_memory_op.cc |  62 ++
 .../quantize.cu => numpy/np_memory_op.cu}  |  12 +-
 src/operator/numpy/np_memory_op.h  |  75 +++
 src/operator/tensor/init_op.h  |  30 ++-
 .../python/unittest/test_numpy_interoperability.py |  39 
 tests/python/unittest/test_numpy_op.py |  79 +++-
 tests/python/unittest/test_operator.py |  56 ++
 24 files changed, 1087 insertions(+), 36 deletions(-)
 create mode 100644 src/operator/numpy/np_diff-inl.h
 create mode 100644 src/operator/numpy/np_diff.cc
 copy src/operator/numpy/{random/np_uniform_op.cu => np_diff.cu} (75%)
 create mode 100644 src/operator/numpy/np_memory_op.cc
 copy src/operator/{quantization/quantize.cu => numpy/np_memory_op.cu} (82%)
 create mode 100644 src/operator/numpy/np_memory_op.h



[incubator-mxnet] branch mkldnn-v1.0 updated (e663ce1 -> b5eb25d)

2019-10-30 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from e663ce1  fallback mkldnn fc bwd in imperative mode (#16672)
 add b5eb25d  disable MKLDNN FC backward

No new revisions were added by this update.

Summary of changes:
 src/operator/nn/fully_connected.cc | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (c23568f -> 19038d0)

2019-10-25 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from c23568f  [mkldnn-v1.0]set fc weight layout as mkldnn v0.2x did (#16593)
 add 19038d0  [mkldnn-v1.0] Upgrade to MKL-DNN v1.0.4 patch release (#16592)

No new revisions were added by this update.

Summary of changes:
 3rdparty/mkldnn | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (ca240b2 -> 2210b21)

2019-10-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from ca240b2  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0
 add 2210b21  [mkldnn-v1.0] Skip flaky test for unidirectional rnn_relu 
(#16545)

No new revisions were added by this update.

Summary of changes:
 tests/python/unittest/test_operator.py | 24 +++-
 1 file changed, 11 insertions(+), 13 deletions(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (ca240b2 -> 2210b21)

2019-10-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from ca240b2  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0
 add 2210b21  [mkldnn-v1.0] Skip flaky test for unidirectional rnn_relu 
(#16545)

No new revisions were added by this update.

Summary of changes:
 tests/python/unittest/test_operator.py | 24 +++-
 1 file changed, 11 insertions(+), 13 deletions(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (d109033 -> ca240b2)

2019-10-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from d109033  re-enable unit tests (#16565)
 add 93ec1f2  [Doc] Use mirror link in the download page (#16501)
 add 06ce371  checking broken link fixes work (#16538)
 add 91bb398  [CD] Adds python docker pipeline (#16547)
 add 1fb6f00  Build dmlc-core with old thread_local implementation (#16526)
 add 261d09d  pickler override for np ndarrays (#16561)
 add 62b0638  Added large tensor support and test for gather_nd (#16371)
 add dcf5fc8  fix doc for topk (#16571)
 add 0ba1ce2  [numpy]op test in new pattern (#16556)
 add ca4af0e  Enforce adding documentation for builtin numpy operators 
(#16575)
 add 34e4f71  Move imagenet inference to nightly (#16577)
 add 06b86da  detect number of procs during sphinx build (#16512)
 add 1ffdd47  Make mrcnn_mask_target arg mask_size a 2d tuple (#16567)
 add 20aa10c  split issue templates (#16558)
 add 10941ab  Dgl ops 2 (#16416)
 add b05d72a  RNNOp to call cudaEventCreate lazily (#16584)
 add 5296ddc  add encoding to the stub files for potential utf8 char in doc 
strings (#16580)
 add ca240b2  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 .github/ISSUE_TEMPLATE.md  |  52 ---
 .github/ISSUE_TEMPLATE/bug_report.md   |  36 ++
 .github/ISSUE_TEMPLATE/feature_request.md  |  17 +
 .github/ISSUE_TEMPLATE/flaky_test.md   |  18 +
 3rdparty/dmlc-core |   2 +-
 CMakeLists.txt |   3 +
 Makefile   |   2 +
 cd/Jenkinsfile_cd_pipeline |  14 +-
 cd/Jenkinsfile_release_job |   3 +-
 cd/Jenkinsfile_utils.groovy|  12 +
 .../python/docker/Dockerfile   |  23 +-
 .../python/docker/Dockerfile.test  |  26 +-
 cd/python/{pypi => docker}/Jenkins_pipeline.groovy |  54 ++-
 cd/python/docker/python_images.sh  | 128 ++
 .../python/docker/test_python_image.sh |  38 +-
 cd/python/pypi/Jenkins_pipeline.groovy |   2 +-
 cd/utils/docker_tag.sh |  59 +++
 .../utils/mxnet_base_image.sh  |  42 +-
 ci/build.py| 179 +
 ci/docker/runtime_functions.sh |   8 +
 ci/docker_cache.py |  87 +
 ci/docker_login.py | 137 +++
 .../scripts/get_cifar_data.sh => ci/logging.conf   |  35 +-
 ci/safe_docker_run.py  | 247 
 ci/test_docker_cache.py|  40 +-
 ci/test_docker_login.py| 234 +++
 ci/test_safe_docker_run.py | 427 +
 ci/util.py |  17 +
 cpp-package/tests/ci_test.sh   |   2 -
 docs/python_docs/python/Makefile_sphinx|  20 +-
 .../python/tutorials/deploy/export/onnx.md |   2 +-
 .../performance/backend/mkldnn/mkldnn_readme.md|   2 +-
 .../themes/mx-theme/mxtheme/footer.html|   4 +-
 .../src/_includes/get_started/linux/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/scala/cpu.md   |   2 +-
 .../src/_includes/get_started/windows/cpp/cpp.md   |   4 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   2 +-
 docs/static_site/src/pages/features.html   |   2 +-
 docs/static_site/src/pages/get_started/download.md |  18 +-
 .../src/pages/get_started/windows_setup.md |   4 +-
 example/gluon/word_language_model/README.md|   2 +-
 python/mxnet/base.py   |   1 +
 python/mxnet/gluon/block.py|   4 +-
 python/mxnet/gluon/contrib/data/text.py|   4 +-
 python/mxnet/gluon/data/dataloader.py  |  32 ++
 python/mxnet/gluon/trainer.py  |   2 +-
 python/mxnet/ndarray/ndarray.py|   5 +-
 python/mxnet/numpy_dispatch_protocol.py|   8 +
 python/mxnet/test_utils.py |   3 +
 src/operator/contrib/mrcnn_mask_target-inl.h   |   8 +-
 src/operator/contrib/mrcnn_mask_target.cu  |  10 +-
 src/operator/numpy/np_elemwise_broadcast_op.cu |   3 -
 src/operator/rnn-inl.h |   9 +-
 src/operator/tensor/indexing_op.h  |  10 +-
 src/operator/tensor/ordering_op.cc |   5 +-
 tests/cpp/engine/thread_local_test.cc  |  80 
 tests/nightly/JenkinsfileForBi

[incubator-mxnet] branch mkldnn-v1.0 updated (d109033 -> ca240b2)

2019-10-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from d109033  re-enable unit tests (#16565)
 add 93ec1f2  [Doc] Use mirror link in the download page (#16501)
 add 06ce371  checking broken link fixes work (#16538)
 add 91bb398  [CD] Adds python docker pipeline (#16547)
 add 1fb6f00  Build dmlc-core with old thread_local implementation (#16526)
 add 261d09d  pickler override for np ndarrays (#16561)
 add 62b0638  Added large tensor support and test for gather_nd (#16371)
 add dcf5fc8  fix doc for topk (#16571)
 add 0ba1ce2  [numpy]op test in new pattern (#16556)
 add ca4af0e  Enforce adding documentation for builtin numpy operators 
(#16575)
 add 34e4f71  Move imagenet inference to nightly (#16577)
 add 06b86da  detect number of procs during sphinx build (#16512)
 add 1ffdd47  Make mrcnn_mask_target arg mask_size a 2d tuple (#16567)
 add 20aa10c  split issue templates (#16558)
 add 10941ab  Dgl ops 2 (#16416)
 add b05d72a  RNNOp to call cudaEventCreate lazily (#16584)
 add 5296ddc  add encoding to the stub files for potential utf8 char in doc 
strings (#16580)
 add ca240b2  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 .github/ISSUE_TEMPLATE.md  |  52 ---
 .github/ISSUE_TEMPLATE/bug_report.md   |  36 ++
 .github/ISSUE_TEMPLATE/feature_request.md  |  17 +
 .github/ISSUE_TEMPLATE/flaky_test.md   |  18 +
 3rdparty/dmlc-core |   2 +-
 CMakeLists.txt |   3 +
 Makefile   |   2 +
 cd/Jenkinsfile_cd_pipeline |  14 +-
 cd/Jenkinsfile_release_job |   3 +-
 cd/Jenkinsfile_utils.groovy|  12 +
 .../python/docker/Dockerfile   |  23 +-
 .../python/docker/Dockerfile.test  |  26 +-
 cd/python/{pypi => docker}/Jenkins_pipeline.groovy |  54 ++-
 cd/python/docker/python_images.sh  | 128 ++
 .../python/docker/test_python_image.sh |  38 +-
 cd/python/pypi/Jenkins_pipeline.groovy |   2 +-
 cd/utils/docker_tag.sh |  59 +++
 .../utils/mxnet_base_image.sh  |  42 +-
 ci/build.py| 179 +
 ci/docker/runtime_functions.sh |   8 +
 ci/docker_cache.py |  87 +
 ci/docker_login.py | 137 +++
 .../scripts/get_cifar_data.sh => ci/logging.conf   |  35 +-
 ci/safe_docker_run.py  | 247 
 ci/test_docker_cache.py|  40 +-
 ci/test_docker_login.py| 234 +++
 ci/test_safe_docker_run.py | 427 +
 ci/util.py |  17 +
 cpp-package/tests/ci_test.sh   |   2 -
 docs/python_docs/python/Makefile_sphinx|  20 +-
 .../python/tutorials/deploy/export/onnx.md |   2 +-
 .../performance/backend/mkldnn/mkldnn_readme.md|   2 +-
 .../themes/mx-theme/mxtheme/footer.html|   4 +-
 .../src/_includes/get_started/linux/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/scala/cpu.md   |   2 +-
 .../src/_includes/get_started/windows/cpp/cpp.md   |   4 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   2 +-
 docs/static_site/src/pages/features.html   |   2 +-
 docs/static_site/src/pages/get_started/download.md |  18 +-
 .../src/pages/get_started/windows_setup.md |   4 +-
 example/gluon/word_language_model/README.md|   2 +-
 python/mxnet/base.py   |   1 +
 python/mxnet/gluon/block.py|   4 +-
 python/mxnet/gluon/contrib/data/text.py|   4 +-
 python/mxnet/gluon/data/dataloader.py  |  32 ++
 python/mxnet/gluon/trainer.py  |   2 +-
 python/mxnet/ndarray/ndarray.py|   5 +-
 python/mxnet/numpy_dispatch_protocol.py|   8 +
 python/mxnet/test_utils.py |   3 +
 src/operator/contrib/mrcnn_mask_target-inl.h   |   8 +-
 src/operator/contrib/mrcnn_mask_target.cu  |  10 +-
 src/operator/numpy/np_elemwise_broadcast_op.cu |   3 -
 src/operator/rnn-inl.h |   9 +-
 src/operator/tensor/indexing_op.h  |  10 +-
 src/operator/tensor/ordering_op.cc |   5 +-
 tests/cpp/engine/thread_local_test.cc  |  80 
 tests/nightly/JenkinsfileForBi

[incubator-mxnet] branch mkldnn-v1.0 updated (d109033 -> ca240b2)

2019-10-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from d109033  re-enable unit tests (#16565)
 add 93ec1f2  [Doc] Use mirror link in the download page (#16501)
 add 06ce371  checking broken link fixes work (#16538)
 add 91bb398  [CD] Adds python docker pipeline (#16547)
 add 1fb6f00  Build dmlc-core with old thread_local implementation (#16526)
 add 261d09d  pickler override for np ndarrays (#16561)
 add 62b0638  Added large tensor support and test for gather_nd (#16371)
 add dcf5fc8  fix doc for topk (#16571)
 add 0ba1ce2  [numpy]op test in new pattern (#16556)
 add ca4af0e  Enforce adding documentation for builtin numpy operators 
(#16575)
 add 34e4f71  Move imagenet inference to nightly (#16577)
 add 06b86da  detect number of procs during sphinx build (#16512)
 add 1ffdd47  Make mrcnn_mask_target arg mask_size a 2d tuple (#16567)
 add 20aa10c  split issue templates (#16558)
 add 10941ab  Dgl ops 2 (#16416)
 add b05d72a  RNNOp to call cudaEventCreate lazily (#16584)
 add 5296ddc  add encoding to the stub files for potential utf8 char in doc 
strings (#16580)
 add ca240b2  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 .github/ISSUE_TEMPLATE.md  |  52 ---
 .github/ISSUE_TEMPLATE/bug_report.md   |  36 ++
 .github/ISSUE_TEMPLATE/feature_request.md  |  17 +
 .github/ISSUE_TEMPLATE/flaky_test.md   |  18 +
 3rdparty/dmlc-core |   2 +-
 CMakeLists.txt |   3 +
 Makefile   |   2 +
 cd/Jenkinsfile_cd_pipeline |  14 +-
 cd/Jenkinsfile_release_job |   3 +-
 cd/Jenkinsfile_utils.groovy|  12 +
 .../python/docker/Dockerfile   |  23 +-
 .../python/docker/Dockerfile.test  |  26 +-
 cd/python/{pypi => docker}/Jenkins_pipeline.groovy |  54 ++-
 cd/python/docker/python_images.sh  | 128 ++
 .../python/docker/test_python_image.sh |  38 +-
 cd/python/pypi/Jenkins_pipeline.groovy |   2 +-
 cd/utils/docker_tag.sh |  59 +++
 .../utils/mxnet_base_image.sh  |  42 +-
 ci/build.py| 179 +
 ci/docker/runtime_functions.sh |   8 +
 ci/docker_cache.py |  87 +
 ci/docker_login.py | 137 +++
 .../scripts/get_cifar_data.sh => ci/logging.conf   |  35 +-
 ci/safe_docker_run.py  | 247 
 ci/test_docker_cache.py|  40 +-
 ci/test_docker_login.py| 234 +++
 ci/test_safe_docker_run.py | 427 +
 ci/util.py |  17 +
 cpp-package/tests/ci_test.sh   |   2 -
 docs/python_docs/python/Makefile_sphinx|  20 +-
 .../python/tutorials/deploy/export/onnx.md |   2 +-
 .../performance/backend/mkldnn/mkldnn_readme.md|   2 +-
 .../themes/mx-theme/mxtheme/footer.html|   4 +-
 .../src/_includes/get_started/linux/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/scala/cpu.md   |   2 +-
 .../src/_includes/get_started/windows/cpp/cpp.md   |   4 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   2 +-
 docs/static_site/src/pages/features.html   |   2 +-
 docs/static_site/src/pages/get_started/download.md |  18 +-
 .../src/pages/get_started/windows_setup.md |   4 +-
 example/gluon/word_language_model/README.md|   2 +-
 python/mxnet/base.py   |   1 +
 python/mxnet/gluon/block.py|   4 +-
 python/mxnet/gluon/contrib/data/text.py|   4 +-
 python/mxnet/gluon/data/dataloader.py  |  32 ++
 python/mxnet/gluon/trainer.py  |   2 +-
 python/mxnet/ndarray/ndarray.py|   5 +-
 python/mxnet/numpy_dispatch_protocol.py|   8 +
 python/mxnet/test_utils.py |   3 +
 src/operator/contrib/mrcnn_mask_target-inl.h   |   8 +-
 src/operator/contrib/mrcnn_mask_target.cu  |  10 +-
 src/operator/numpy/np_elemwise_broadcast_op.cu |   3 -
 src/operator/rnn-inl.h |   9 +-
 src/operator/tensor/indexing_op.h  |  10 +-
 src/operator/tensor/ordering_op.cc |   5 +-
 tests/cpp/engine/thread_local_test.cc  |  80 
 tests/nightly/JenkinsfileForBi

[incubator-mxnet] branch mkldnn-v1.0 updated (d109033 -> ca240b2)

2019-10-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from d109033  re-enable unit tests (#16565)
 add 93ec1f2  [Doc] Use mirror link in the download page (#16501)
 add 06ce371  checking broken link fixes work (#16538)
 add 91bb398  [CD] Adds python docker pipeline (#16547)
 add 1fb6f00  Build dmlc-core with old thread_local implementation (#16526)
 add 261d09d  pickler override for np ndarrays (#16561)
 add 62b0638  Added large tensor support and test for gather_nd (#16371)
 add dcf5fc8  fix doc for topk (#16571)
 add 0ba1ce2  [numpy]op test in new pattern (#16556)
 add ca4af0e  Enforce adding documentation for builtin numpy operators 
(#16575)
 add 34e4f71  Move imagenet inference to nightly (#16577)
 add 06b86da  detect number of procs during sphinx build (#16512)
 add 1ffdd47  Make mrcnn_mask_target arg mask_size a 2d tuple (#16567)
 add 20aa10c  split issue templates (#16558)
 add 10941ab  Dgl ops 2 (#16416)
 add b05d72a  RNNOp to call cudaEventCreate lazily (#16584)
 add 5296ddc  add encoding to the stub files for potential utf8 char in doc 
strings (#16580)
 add ca240b2  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 .github/ISSUE_TEMPLATE.md  |  52 ---
 .github/ISSUE_TEMPLATE/bug_report.md   |  36 ++
 .github/ISSUE_TEMPLATE/feature_request.md  |  17 +
 .github/ISSUE_TEMPLATE/flaky_test.md   |  18 +
 3rdparty/dmlc-core |   2 +-
 CMakeLists.txt |   3 +
 Makefile   |   2 +
 cd/Jenkinsfile_cd_pipeline |  14 +-
 cd/Jenkinsfile_release_job |   3 +-
 cd/Jenkinsfile_utils.groovy|  12 +
 .../python/docker/Dockerfile   |  23 +-
 .../python/docker/Dockerfile.test  |  26 +-
 cd/python/{pypi => docker}/Jenkins_pipeline.groovy |  54 ++-
 cd/python/docker/python_images.sh  | 128 ++
 .../python/docker/test_python_image.sh |  38 +-
 cd/python/pypi/Jenkins_pipeline.groovy |   2 +-
 cd/utils/docker_tag.sh |  59 +++
 .../utils/mxnet_base_image.sh  |  42 +-
 ci/build.py| 179 +
 ci/docker/runtime_functions.sh |   8 +
 ci/docker_cache.py |  87 +
 ci/docker_login.py | 137 +++
 .../scripts/get_cifar_data.sh => ci/logging.conf   |  35 +-
 ci/safe_docker_run.py  | 247 
 ci/test_docker_cache.py|  40 +-
 ci/test_docker_login.py| 234 +++
 ci/test_safe_docker_run.py | 427 +
 ci/util.py |  17 +
 cpp-package/tests/ci_test.sh   |   2 -
 docs/python_docs/python/Makefile_sphinx|  20 +-
 .../python/tutorials/deploy/export/onnx.md |   2 +-
 .../performance/backend/mkldnn/mkldnn_readme.md|   2 +-
 .../themes/mx-theme/mxtheme/footer.html|   4 +-
 .../src/_includes/get_started/linux/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/scala/cpu.md   |   2 +-
 .../src/_includes/get_started/windows/cpp/cpp.md   |   4 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   2 +-
 docs/static_site/src/pages/features.html   |   2 +-
 docs/static_site/src/pages/get_started/download.md |  18 +-
 .../src/pages/get_started/windows_setup.md |   4 +-
 example/gluon/word_language_model/README.md|   2 +-
 python/mxnet/base.py   |   1 +
 python/mxnet/gluon/block.py|   4 +-
 python/mxnet/gluon/contrib/data/text.py|   4 +-
 python/mxnet/gluon/data/dataloader.py  |  32 ++
 python/mxnet/gluon/trainer.py  |   2 +-
 python/mxnet/ndarray/ndarray.py|   5 +-
 python/mxnet/numpy_dispatch_protocol.py|   8 +
 python/mxnet/test_utils.py |   3 +
 src/operator/contrib/mrcnn_mask_target-inl.h   |   8 +-
 src/operator/contrib/mrcnn_mask_target.cu  |  10 +-
 src/operator/numpy/np_elemwise_broadcast_op.cu |   3 -
 src/operator/rnn-inl.h |   9 +-
 src/operator/tensor/indexing_op.h  |  10 +-
 src/operator/tensor/ordering_op.cc |   5 +-
 tests/cpp/engine/thread_local_test.cc  |  80 
 tests/nightly/JenkinsfileForBi

[incubator-mxnet] branch mkldnn-v1.0 updated (d109033 -> ca240b2)

2019-10-23 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from d109033  re-enable unit tests (#16565)
 add 93ec1f2  [Doc] Use mirror link in the download page (#16501)
 add 06ce371  checking broken link fixes work (#16538)
 add 91bb398  [CD] Adds python docker pipeline (#16547)
 add 1fb6f00  Build dmlc-core with old thread_local implementation (#16526)
 add 261d09d  pickler override for np ndarrays (#16561)
 add 62b0638  Added large tensor support and test for gather_nd (#16371)
 add dcf5fc8  fix doc for topk (#16571)
 add 0ba1ce2  [numpy]op test in new pattern (#16556)
 add ca4af0e  Enforce adding documentation for builtin numpy operators 
(#16575)
 add 34e4f71  Move imagenet inference to nightly (#16577)
 add 06b86da  detect number of procs during sphinx build (#16512)
 add 1ffdd47  Make mrcnn_mask_target arg mask_size a 2d tuple (#16567)
 add 20aa10c  split issue templates (#16558)
 add 10941ab  Dgl ops 2 (#16416)
 add b05d72a  RNNOp to call cudaEventCreate lazily (#16584)
 add 5296ddc  add encoding to the stub files for potential utf8 char in doc 
strings (#16580)
 add ca240b2  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 .github/ISSUE_TEMPLATE.md  |  52 ---
 .github/ISSUE_TEMPLATE/bug_report.md   |  36 ++
 .github/ISSUE_TEMPLATE/feature_request.md  |  17 +
 .github/ISSUE_TEMPLATE/flaky_test.md   |  18 +
 3rdparty/dmlc-core |   2 +-
 CMakeLists.txt |   3 +
 Makefile   |   2 +
 cd/Jenkinsfile_cd_pipeline |  14 +-
 cd/Jenkinsfile_release_job |   3 +-
 cd/Jenkinsfile_utils.groovy|  12 +
 .../python/docker/Dockerfile   |  23 +-
 .../python/docker/Dockerfile.test  |  26 +-
 cd/python/{pypi => docker}/Jenkins_pipeline.groovy |  54 ++-
 cd/python/docker/python_images.sh  | 128 ++
 .../python/docker/test_python_image.sh |  38 +-
 cd/python/pypi/Jenkins_pipeline.groovy |   2 +-
 cd/utils/docker_tag.sh |  59 +++
 .../utils/mxnet_base_image.sh  |  42 +-
 ci/build.py| 179 +
 ci/docker/runtime_functions.sh |   8 +
 ci/docker_cache.py |  87 +
 ci/docker_login.py | 137 +++
 .../scripts/get_cifar_data.sh => ci/logging.conf   |  35 +-
 ci/safe_docker_run.py  | 247 
 ci/test_docker_cache.py|  40 +-
 ci/test_docker_login.py| 234 +++
 ci/test_safe_docker_run.py | 427 +
 ci/util.py |  17 +
 cpp-package/tests/ci_test.sh   |   2 -
 docs/python_docs/python/Makefile_sphinx|  20 +-
 .../python/tutorials/deploy/export/onnx.md |   2 +-
 .../performance/backend/mkldnn/mkldnn_readme.md|   2 +-
 .../themes/mx-theme/mxtheme/footer.html|   4 +-
 .../src/_includes/get_started/linux/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/cpp/cpp.md |   2 +-
 .../src/_includes/get_started/macos/scala/cpu.md   |   2 +-
 .../src/_includes/get_started/windows/cpp/cpp.md   |   4 +-
 docs/static_site/src/pages/api/faq/env_var.md  |   2 +-
 docs/static_site/src/pages/features.html   |   2 +-
 docs/static_site/src/pages/get_started/download.md |  18 +-
 .../src/pages/get_started/windows_setup.md |   4 +-
 example/gluon/word_language_model/README.md|   2 +-
 python/mxnet/base.py   |   1 +
 python/mxnet/gluon/block.py|   4 +-
 python/mxnet/gluon/contrib/data/text.py|   4 +-
 python/mxnet/gluon/data/dataloader.py  |  32 ++
 python/mxnet/gluon/trainer.py  |   2 +-
 python/mxnet/ndarray/ndarray.py|   5 +-
 python/mxnet/numpy_dispatch_protocol.py|   8 +
 python/mxnet/test_utils.py |   3 +
 src/operator/contrib/mrcnn_mask_target-inl.h   |   8 +-
 src/operator/contrib/mrcnn_mask_target.cu  |  10 +-
 src/operator/numpy/np_elemwise_broadcast_op.cu |   3 -
 src/operator/rnn-inl.h |   9 +-
 src/operator/tensor/indexing_op.h  |  10 +-
 src/operator/tensor/ordering_op.cc |   5 +-
 tests/cpp/engine/thread_local_test.cc  |  80 
 tests/nightly/JenkinsfileForBi

[incubator-mxnet] branch mkldnn-v1.0 updated (076a55f -> 2bdfca9)

2019-10-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 076a55f  change MXNET_USE_MKLDNN from 100 to 1 (#16551)
 add 9fecfbb  Add test pipeline for USE_TVM_OP=OFF on Unix (#16450)
 add b583059  Numpy dispatch test of .. (#16422)
 add 149e034  typo fix in r doc lstm tutorial (#16546)
 add fc81c64  Correct Google Analytics Tracker (#16490)
 add ffec31f  Aggregated adamw update (#16398)
 add 5b67a69  try to fix block (#16465)
 add c1d02ce  setup and concatenate, copy, expand_dims, expm1 (#16493)
 add cdfaf39  add sum for boolean type in mainline (#16436)
 add 1648f4c  [Numpy] SVD outputs tuple (#16530)
 add 5accae0  numpy op doc: max, min, prod (#16506)
 add b949716  add interface for rand
 add 217ae02  Fix numpy bugs (#16537)
 add 746cbc5  Add unit tests for TensorRT integration and fix some bugs 
(#15399)
 add 2bdfca9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 3rdparty/mshadow/mshadow/base.h|  10 +
 3rdparty/onnx-tensorrt |   2 +-
 ci/docker/install/tensorrt.sh  |   7 +-
 ci/docker/runtime_functions.sh |  71 ++
 ci/jenkins/Jenkins_steps.groovy|  75 ++
 ci/jenkins/Jenkinsfile_unix_cpu|   4 +-
 ci/jenkins/Jenkinsfile_unix_gpu|   9 +-
 docs/python_docs/_static/google_analytics.js   |   2 +-
 .../pages/api/r/docs/tutorials/multi_dim_lstm.md   |   2 +-
 python/mxnet/_numpy_op_doc.py  | 278 ++--
 python/mxnet/contrib/tensorrt.py   |   2 +
 python/mxnet/gluon/block.py| 110 ++-
 python/mxnet/ndarray/contrib.py|  56 +-
 python/mxnet/ndarray/ndarray.py|  13 +
 python/mxnet/ndarray/numpy/_op.py  |  44 +-
 python/mxnet/ndarray/numpy/linalg.py   |  77 ++-
 python/mxnet/ndarray/numpy/random.py   |  29 +-
 python/mxnet/numpy/linalg.py   |  76 +-
 python/mxnet/numpy/multiarray.py   | 273 +++-
 python/mxnet/numpy/random.py   |  30 +-
 python/mxnet/numpy/utils.py|   4 +-
 python/mxnet/numpy_dispatch_protocol.py|  18 +-
 python/mxnet/symbol/numpy/_symbol.py   |  28 +-
 python/mxnet/symbol/numpy/linalg.py|  64 +-
 python/mxnet/symbol/numpy/random.py|  30 +-
 python/mxnet/util.py   |   7 +-
 src/c_api/c_api_symbolic.cc|  38 +-
 src/common/utils.h |   4 +
 src/operator/contrib/adamw-inl.h   | 368 --
 src/operator/contrib/adamw.cc  | 166 -
 src/operator/contrib/adamw.cu  |  34 +-
 src/operator/mshadow_op.h  |  28 +
 src/operator/mxnet_op.h|  37 +-
 src/operator/numpy/linalg/np_gesvd.cc  |   6 +-
 src/operator/numpy/linalg/np_gesvd.cu  |   4 +-
 src/operator/numpy/np_broadcast_reduce_op.h|   4 +-
 src/operator/numpy/np_true_divide-inl.h| 146 
 src/operator/numpy/np_true_divide.cc   |  40 +-
 src/operator/numpy/np_true_divide.cu   |  10 +-
 src/operator/operator_tune.cc  |   2 +
 src/operator/subgraph/tensorrt/nnvm_to_onnx-inl.h  |  15 +-
 src/operator/subgraph/tensorrt/nnvm_to_onnx.cc |  43 +-
 src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc |   2 +-
 src/operator/subgraph/tensorrt/tensorrt-inl.h  | 119 +++-
 src/operator/subgraph/tensorrt/tensorrt.cc |   2 +-
 src/operator/tensor/broadcast_reduce_op.h  |   2 +-
 tests/python/gpu/test_gluon_gpu.py |  15 +
 tests/python/gpu/test_operator_gpu.py  |   1 +
 tests/python/tensorrt/common.py|  30 -
 tests/python/tensorrt/lenet5_common.py |  31 -
 tests/python/tensorrt/lenet5_train.py  |  12 +-
 tests/python/tensorrt/test_cvnets.py   |   7 +-
 tests/python/tensorrt/test_ops.py  | 527 --
 tests/python/tensorrt/test_resnet18.py |   2 +-
 tests/python/tensorrt/test_tensorrt_batchnorm.py   |  65 --
 tests/python/tensorrt/test_tensorrt_deconv.py  |  63 --
 tests/python/tensorrt/test_tensorrt_lenet5.py  |  28 +-
 tests/python/unittest/test_contrib_optimizer.py| 236 ---
 tests/python/unittest/test_gluon.py|  44 ++
 tests/python/unittest/test_ndarray.py  |  24 +-
 .../python/unittest/test_numpy_interoperability.py | 767 +++--
 tests/python/unittest/test_numpy_op.py | 164

[incubator-mxnet] branch mkldnn-v1.0 updated (076a55f -> 2bdfca9)

2019-10-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 076a55f  change MXNET_USE_MKLDNN from 100 to 1 (#16551)
 add 9fecfbb  Add test pipeline for USE_TVM_OP=OFF on Unix (#16450)
 add b583059  Numpy dispatch test of .. (#16422)
 add 149e034  typo fix in r doc lstm tutorial (#16546)
 add fc81c64  Correct Google Analytics Tracker (#16490)
 add ffec31f  Aggregated adamw update (#16398)
 add 5b67a69  try to fix block (#16465)
 add c1d02ce  setup and concatenate, copy, expand_dims, expm1 (#16493)
 add cdfaf39  add sum for boolean type in mainline (#16436)
 add 1648f4c  [Numpy] SVD outputs tuple (#16530)
 add 5accae0  numpy op doc: max, min, prod (#16506)
 add b949716  add interface for rand
 add 217ae02  Fix numpy bugs (#16537)
 add 746cbc5  Add unit tests for TensorRT integration and fix some bugs 
(#15399)
 add 2bdfca9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 3rdparty/mshadow/mshadow/base.h|  10 +
 3rdparty/onnx-tensorrt |   2 +-
 ci/docker/install/tensorrt.sh  |   7 +-
 ci/docker/runtime_functions.sh |  71 ++
 ci/jenkins/Jenkins_steps.groovy|  75 ++
 ci/jenkins/Jenkinsfile_unix_cpu|   4 +-
 ci/jenkins/Jenkinsfile_unix_gpu|   9 +-
 docs/python_docs/_static/google_analytics.js   |   2 +-
 .../pages/api/r/docs/tutorials/multi_dim_lstm.md   |   2 +-
 python/mxnet/_numpy_op_doc.py  | 278 ++--
 python/mxnet/contrib/tensorrt.py   |   2 +
 python/mxnet/gluon/block.py| 110 ++-
 python/mxnet/ndarray/contrib.py|  56 +-
 python/mxnet/ndarray/ndarray.py|  13 +
 python/mxnet/ndarray/numpy/_op.py  |  44 +-
 python/mxnet/ndarray/numpy/linalg.py   |  77 ++-
 python/mxnet/ndarray/numpy/random.py   |  29 +-
 python/mxnet/numpy/linalg.py   |  76 +-
 python/mxnet/numpy/multiarray.py   | 273 +++-
 python/mxnet/numpy/random.py   |  30 +-
 python/mxnet/numpy/utils.py|   4 +-
 python/mxnet/numpy_dispatch_protocol.py|  18 +-
 python/mxnet/symbol/numpy/_symbol.py   |  28 +-
 python/mxnet/symbol/numpy/linalg.py|  64 +-
 python/mxnet/symbol/numpy/random.py|  30 +-
 python/mxnet/util.py   |   7 +-
 src/c_api/c_api_symbolic.cc|  38 +-
 src/common/utils.h |   4 +
 src/operator/contrib/adamw-inl.h   | 368 --
 src/operator/contrib/adamw.cc  | 166 -
 src/operator/contrib/adamw.cu  |  34 +-
 src/operator/mshadow_op.h  |  28 +
 src/operator/mxnet_op.h|  37 +-
 src/operator/numpy/linalg/np_gesvd.cc  |   6 +-
 src/operator/numpy/linalg/np_gesvd.cu  |   4 +-
 src/operator/numpy/np_broadcast_reduce_op.h|   4 +-
 src/operator/numpy/np_true_divide-inl.h| 146 
 src/operator/numpy/np_true_divide.cc   |  40 +-
 src/operator/numpy/np_true_divide.cu   |  10 +-
 src/operator/operator_tune.cc  |   2 +
 src/operator/subgraph/tensorrt/nnvm_to_onnx-inl.h  |  15 +-
 src/operator/subgraph/tensorrt/nnvm_to_onnx.cc |  43 +-
 src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc |   2 +-
 src/operator/subgraph/tensorrt/tensorrt-inl.h  | 119 +++-
 src/operator/subgraph/tensorrt/tensorrt.cc |   2 +-
 src/operator/tensor/broadcast_reduce_op.h  |   2 +-
 tests/python/gpu/test_gluon_gpu.py |  15 +
 tests/python/gpu/test_operator_gpu.py  |   1 +
 tests/python/tensorrt/common.py|  30 -
 tests/python/tensorrt/lenet5_common.py |  31 -
 tests/python/tensorrt/lenet5_train.py  |  12 +-
 tests/python/tensorrt/test_cvnets.py   |   7 +-
 tests/python/tensorrt/test_ops.py  | 527 --
 tests/python/tensorrt/test_resnet18.py |   2 +-
 tests/python/tensorrt/test_tensorrt_batchnorm.py   |  65 --
 tests/python/tensorrt/test_tensorrt_deconv.py  |  63 --
 tests/python/tensorrt/test_tensorrt_lenet5.py  |  28 +-
 tests/python/unittest/test_contrib_optimizer.py| 236 ---
 tests/python/unittest/test_gluon.py|  44 ++
 tests/python/unittest/test_ndarray.py  |  24 +-
 .../python/unittest/test_numpy_interoperability.py | 767 +++--
 tests/python/unittest/test_numpy_op.py | 164

[incubator-mxnet] branch mkldnn-v1.0 updated (076a55f -> 2bdfca9)

2019-10-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 076a55f  change MXNET_USE_MKLDNN from 100 to 1 (#16551)
 add 9fecfbb  Add test pipeline for USE_TVM_OP=OFF on Unix (#16450)
 add b583059  Numpy dispatch test of .. (#16422)
 add 149e034  typo fix in r doc lstm tutorial (#16546)
 add fc81c64  Correct Google Analytics Tracker (#16490)
 add ffec31f  Aggregated adamw update (#16398)
 add 5b67a69  try to fix block (#16465)
 add c1d02ce  setup and concatenate, copy, expand_dims, expm1 (#16493)
 add cdfaf39  add sum for boolean type in mainline (#16436)
 add 1648f4c  [Numpy] SVD outputs tuple (#16530)
 add 5accae0  numpy op doc: max, min, prod (#16506)
 add b949716  add interface for rand
 add 217ae02  Fix numpy bugs (#16537)
 add 746cbc5  Add unit tests for TensorRT integration and fix some bugs 
(#15399)
 add 2bdfca9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 3rdparty/mshadow/mshadow/base.h|  10 +
 3rdparty/onnx-tensorrt |   2 +-
 ci/docker/install/tensorrt.sh  |   7 +-
 ci/docker/runtime_functions.sh |  71 ++
 ci/jenkins/Jenkins_steps.groovy|  75 ++
 ci/jenkins/Jenkinsfile_unix_cpu|   4 +-
 ci/jenkins/Jenkinsfile_unix_gpu|   9 +-
 docs/python_docs/_static/google_analytics.js   |   2 +-
 .../pages/api/r/docs/tutorials/multi_dim_lstm.md   |   2 +-
 python/mxnet/_numpy_op_doc.py  | 278 ++--
 python/mxnet/contrib/tensorrt.py   |   2 +
 python/mxnet/gluon/block.py| 110 ++-
 python/mxnet/ndarray/contrib.py|  56 +-
 python/mxnet/ndarray/ndarray.py|  13 +
 python/mxnet/ndarray/numpy/_op.py  |  44 +-
 python/mxnet/ndarray/numpy/linalg.py   |  77 ++-
 python/mxnet/ndarray/numpy/random.py   |  29 +-
 python/mxnet/numpy/linalg.py   |  76 +-
 python/mxnet/numpy/multiarray.py   | 273 +++-
 python/mxnet/numpy/random.py   |  30 +-
 python/mxnet/numpy/utils.py|   4 +-
 python/mxnet/numpy_dispatch_protocol.py|  18 +-
 python/mxnet/symbol/numpy/_symbol.py   |  28 +-
 python/mxnet/symbol/numpy/linalg.py|  64 +-
 python/mxnet/symbol/numpy/random.py|  30 +-
 python/mxnet/util.py   |   7 +-
 src/c_api/c_api_symbolic.cc|  38 +-
 src/common/utils.h |   4 +
 src/operator/contrib/adamw-inl.h   | 368 --
 src/operator/contrib/adamw.cc  | 166 -
 src/operator/contrib/adamw.cu  |  34 +-
 src/operator/mshadow_op.h  |  28 +
 src/operator/mxnet_op.h|  37 +-
 src/operator/numpy/linalg/np_gesvd.cc  |   6 +-
 src/operator/numpy/linalg/np_gesvd.cu  |   4 +-
 src/operator/numpy/np_broadcast_reduce_op.h|   4 +-
 src/operator/numpy/np_true_divide-inl.h| 146 
 src/operator/numpy/np_true_divide.cc   |  40 +-
 src/operator/numpy/np_true_divide.cu   |  10 +-
 src/operator/operator_tune.cc  |   2 +
 src/operator/subgraph/tensorrt/nnvm_to_onnx-inl.h  |  15 +-
 src/operator/subgraph/tensorrt/nnvm_to_onnx.cc |  43 +-
 src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc |   2 +-
 src/operator/subgraph/tensorrt/tensorrt-inl.h  | 119 +++-
 src/operator/subgraph/tensorrt/tensorrt.cc |   2 +-
 src/operator/tensor/broadcast_reduce_op.h  |   2 +-
 tests/python/gpu/test_gluon_gpu.py |  15 +
 tests/python/gpu/test_operator_gpu.py  |   1 +
 tests/python/tensorrt/common.py|  30 -
 tests/python/tensorrt/lenet5_common.py |  31 -
 tests/python/tensorrt/lenet5_train.py  |  12 +-
 tests/python/tensorrt/test_cvnets.py   |   7 +-
 tests/python/tensorrt/test_ops.py  | 527 --
 tests/python/tensorrt/test_resnet18.py |   2 +-
 tests/python/tensorrt/test_tensorrt_batchnorm.py   |  65 --
 tests/python/tensorrt/test_tensorrt_deconv.py  |  63 --
 tests/python/tensorrt/test_tensorrt_lenet5.py  |  28 +-
 tests/python/unittest/test_contrib_optimizer.py| 236 ---
 tests/python/unittest/test_gluon.py|  44 ++
 tests/python/unittest/test_ndarray.py  |  24 +-
 .../python/unittest/test_numpy_interoperability.py | 767 +++--
 tests/python/unittest/test_numpy_op.py | 164

[incubator-mxnet] branch mkldnn-v1.0 updated (076a55f -> 2bdfca9)

2019-10-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 076a55f  change MXNET_USE_MKLDNN from 100 to 1 (#16551)
 add 9fecfbb  Add test pipeline for USE_TVM_OP=OFF on Unix (#16450)
 add b583059  Numpy dispatch test of .. (#16422)
 add 149e034  typo fix in r doc lstm tutorial (#16546)
 add fc81c64  Correct Google Analytics Tracker (#16490)
 add ffec31f  Aggregated adamw update (#16398)
 add 5b67a69  try to fix block (#16465)
 add c1d02ce  setup and concatenate, copy, expand_dims, expm1 (#16493)
 add cdfaf39  add sum for boolean type in mainline (#16436)
 add 1648f4c  [Numpy] SVD outputs tuple (#16530)
 add 5accae0  numpy op doc: max, min, prod (#16506)
 add b949716  add interface for rand
 add 217ae02  Fix numpy bugs (#16537)
 add 746cbc5  Add unit tests for TensorRT integration and fix some bugs 
(#15399)
 add 2bdfca9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 3rdparty/mshadow/mshadow/base.h|  10 +
 3rdparty/onnx-tensorrt |   2 +-
 ci/docker/install/tensorrt.sh  |   7 +-
 ci/docker/runtime_functions.sh |  71 ++
 ci/jenkins/Jenkins_steps.groovy|  75 ++
 ci/jenkins/Jenkinsfile_unix_cpu|   4 +-
 ci/jenkins/Jenkinsfile_unix_gpu|   9 +-
 docs/python_docs/_static/google_analytics.js   |   2 +-
 .../pages/api/r/docs/tutorials/multi_dim_lstm.md   |   2 +-
 python/mxnet/_numpy_op_doc.py  | 278 ++--
 python/mxnet/contrib/tensorrt.py   |   2 +
 python/mxnet/gluon/block.py| 110 ++-
 python/mxnet/ndarray/contrib.py|  56 +-
 python/mxnet/ndarray/ndarray.py|  13 +
 python/mxnet/ndarray/numpy/_op.py  |  44 +-
 python/mxnet/ndarray/numpy/linalg.py   |  77 ++-
 python/mxnet/ndarray/numpy/random.py   |  29 +-
 python/mxnet/numpy/linalg.py   |  76 +-
 python/mxnet/numpy/multiarray.py   | 273 +++-
 python/mxnet/numpy/random.py   |  30 +-
 python/mxnet/numpy/utils.py|   4 +-
 python/mxnet/numpy_dispatch_protocol.py|  18 +-
 python/mxnet/symbol/numpy/_symbol.py   |  28 +-
 python/mxnet/symbol/numpy/linalg.py|  64 +-
 python/mxnet/symbol/numpy/random.py|  30 +-
 python/mxnet/util.py   |   7 +-
 src/c_api/c_api_symbolic.cc|  38 +-
 src/common/utils.h |   4 +
 src/operator/contrib/adamw-inl.h   | 368 --
 src/operator/contrib/adamw.cc  | 166 -
 src/operator/contrib/adamw.cu  |  34 +-
 src/operator/mshadow_op.h  |  28 +
 src/operator/mxnet_op.h|  37 +-
 src/operator/numpy/linalg/np_gesvd.cc  |   6 +-
 src/operator/numpy/linalg/np_gesvd.cu  |   4 +-
 src/operator/numpy/np_broadcast_reduce_op.h|   4 +-
 src/operator/numpy/np_true_divide-inl.h| 146 
 src/operator/numpy/np_true_divide.cc   |  40 +-
 src/operator/numpy/np_true_divide.cu   |  10 +-
 src/operator/operator_tune.cc  |   2 +
 src/operator/subgraph/tensorrt/nnvm_to_onnx-inl.h  |  15 +-
 src/operator/subgraph/tensorrt/nnvm_to_onnx.cc |  43 +-
 src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc |   2 +-
 src/operator/subgraph/tensorrt/tensorrt-inl.h  | 119 +++-
 src/operator/subgraph/tensorrt/tensorrt.cc |   2 +-
 src/operator/tensor/broadcast_reduce_op.h  |   2 +-
 tests/python/gpu/test_gluon_gpu.py |  15 +
 tests/python/gpu/test_operator_gpu.py  |   1 +
 tests/python/tensorrt/common.py|  30 -
 tests/python/tensorrt/lenet5_common.py |  31 -
 tests/python/tensorrt/lenet5_train.py  |  12 +-
 tests/python/tensorrt/test_cvnets.py   |   7 +-
 tests/python/tensorrt/test_ops.py  | 527 --
 tests/python/tensorrt/test_resnet18.py |   2 +-
 tests/python/tensorrt/test_tensorrt_batchnorm.py   |  65 --
 tests/python/tensorrt/test_tensorrt_deconv.py  |  63 --
 tests/python/tensorrt/test_tensorrt_lenet5.py  |  28 +-
 tests/python/unittest/test_contrib_optimizer.py| 236 ---
 tests/python/unittest/test_gluon.py|  44 ++
 tests/python/unittest/test_ndarray.py  |  24 +-
 .../python/unittest/test_numpy_interoperability.py | 767 +++--
 tests/python/unittest/test_numpy_op.py | 164

[incubator-mxnet] branch mkldnn-v1.0 updated (076a55f -> 2bdfca9)

2019-10-21 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 076a55f  change MXNET_USE_MKLDNN from 100 to 1 (#16551)
 add 9fecfbb  Add test pipeline for USE_TVM_OP=OFF on Unix (#16450)
 add b583059  Numpy dispatch test of .. (#16422)
 add 149e034  typo fix in r doc lstm tutorial (#16546)
 add fc81c64  Correct Google Analytics Tracker (#16490)
 add ffec31f  Aggregated adamw update (#16398)
 add 5b67a69  try to fix block (#16465)
 add c1d02ce  setup and concatenate, copy, expand_dims, expm1 (#16493)
 add cdfaf39  add sum for boolean type in mainline (#16436)
 add 1648f4c  [Numpy] SVD outputs tuple (#16530)
 add 5accae0  numpy op doc: max, min, prod (#16506)
 add b949716  add interface for rand
 add 217ae02  Fix numpy bugs (#16537)
 add 746cbc5  Add unit tests for TensorRT integration and fix some bugs 
(#15399)
 add 2bdfca9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 3rdparty/mshadow/mshadow/base.h|  10 +
 3rdparty/onnx-tensorrt |   2 +-
 ci/docker/install/tensorrt.sh  |   7 +-
 ci/docker/runtime_functions.sh |  71 ++
 ci/jenkins/Jenkins_steps.groovy|  75 ++
 ci/jenkins/Jenkinsfile_unix_cpu|   4 +-
 ci/jenkins/Jenkinsfile_unix_gpu|   9 +-
 docs/python_docs/_static/google_analytics.js   |   2 +-
 .../pages/api/r/docs/tutorials/multi_dim_lstm.md   |   2 +-
 python/mxnet/_numpy_op_doc.py  | 278 ++--
 python/mxnet/contrib/tensorrt.py   |   2 +
 python/mxnet/gluon/block.py| 110 ++-
 python/mxnet/ndarray/contrib.py|  56 +-
 python/mxnet/ndarray/ndarray.py|  13 +
 python/mxnet/ndarray/numpy/_op.py  |  44 +-
 python/mxnet/ndarray/numpy/linalg.py   |  77 ++-
 python/mxnet/ndarray/numpy/random.py   |  29 +-
 python/mxnet/numpy/linalg.py   |  76 +-
 python/mxnet/numpy/multiarray.py   | 273 +++-
 python/mxnet/numpy/random.py   |  30 +-
 python/mxnet/numpy/utils.py|   4 +-
 python/mxnet/numpy_dispatch_protocol.py|  18 +-
 python/mxnet/symbol/numpy/_symbol.py   |  28 +-
 python/mxnet/symbol/numpy/linalg.py|  64 +-
 python/mxnet/symbol/numpy/random.py|  30 +-
 python/mxnet/util.py   |   7 +-
 src/c_api/c_api_symbolic.cc|  38 +-
 src/common/utils.h |   4 +
 src/operator/contrib/adamw-inl.h   | 368 --
 src/operator/contrib/adamw.cc  | 166 -
 src/operator/contrib/adamw.cu  |  34 +-
 src/operator/mshadow_op.h  |  28 +
 src/operator/mxnet_op.h|  37 +-
 src/operator/numpy/linalg/np_gesvd.cc  |   6 +-
 src/operator/numpy/linalg/np_gesvd.cu  |   4 +-
 src/operator/numpy/np_broadcast_reduce_op.h|   4 +-
 src/operator/numpy/np_true_divide-inl.h| 146 
 src/operator/numpy/np_true_divide.cc   |  40 +-
 src/operator/numpy/np_true_divide.cu   |  10 +-
 src/operator/operator_tune.cc  |   2 +
 src/operator/subgraph/tensorrt/nnvm_to_onnx-inl.h  |  15 +-
 src/operator/subgraph/tensorrt/nnvm_to_onnx.cc |  43 +-
 src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc |   2 +-
 src/operator/subgraph/tensorrt/tensorrt-inl.h  | 119 +++-
 src/operator/subgraph/tensorrt/tensorrt.cc |   2 +-
 src/operator/tensor/broadcast_reduce_op.h  |   2 +-
 tests/python/gpu/test_gluon_gpu.py |  15 +
 tests/python/gpu/test_operator_gpu.py  |   1 +
 tests/python/tensorrt/common.py|  30 -
 tests/python/tensorrt/lenet5_common.py |  31 -
 tests/python/tensorrt/lenet5_train.py  |  12 +-
 tests/python/tensorrt/test_cvnets.py   |   7 +-
 tests/python/tensorrt/test_ops.py  | 527 --
 tests/python/tensorrt/test_resnet18.py |   2 +-
 tests/python/tensorrt/test_tensorrt_batchnorm.py   |  65 --
 tests/python/tensorrt/test_tensorrt_deconv.py  |  63 --
 tests/python/tensorrt/test_tensorrt_lenet5.py  |  28 +-
 tests/python/unittest/test_contrib_optimizer.py| 236 ---
 tests/python/unittest/test_gluon.py|  44 ++
 tests/python/unittest/test_ndarray.py  |  24 +-
 .../python/unittest/test_numpy_interoperability.py | 767 +++--
 tests/python/unittest/test_numpy_op.py | 164

[incubator-mxnet] branch mkldnn-v1.0 updated (61132d0 -> fe1b1ea)

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 61132d0  remove MKL license (#16534)
 add bf57ff8  added more tests to verify support for large vector (#16477)
 add de524bb  Fixing broken links (#16500)
 add a4ea4a8  Load NDArray only to GPU if GPU is present (#16432)
 add d1200c9  add binary and docs build command options (#16514)
 add e4f8c50  [MKLDNN] Fix uint quantized fc when not fusing with 
requantize (#16523)
 add f6cfbdf  improve unary and binary operator handling and refactor tests 
(#16423)
 add 77e9898  Bug fix for the input of same axes of the swapaxes operator 
(#16513)
 add f2ed1d4  added support for large tensors for Dropout operator and 
tests to verify support for more operators (#16409)
 add 63fbfb1  [DOC] Fix numpy op doc  (#16504)
 add f01bcaa  [Numpy] More numpy dispatch tests (#16426)
 add 27f7082  Fix learning rate scheduler being unexpectedly overwritten by 
optimizer's default value (#16487)
 add 73bff7d  adding large tensor support for add_n and tests for more ops 
(#16476)
 add 4b8a95f  add option to remove indexes (#16525)
 add 32bb374  disable tests (#16536)
 add efa5369  adding large tensor support for pad operator (#15126)
 add 2d4c3a4  fix pylint in CI (#16540)
 add 27b3e52  image crop gpu (#16464)
 add a75ec06  [Numpy] einsum (#15911)
 new fe1b1ea  Merge branch 'master' of 
https://github.com/apache/incubator-mxnet into mkldnn-v1.0

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |  

[incubator-mxnet] 01/01: Merge branch 'master' of https://github.com/apache/incubator-mxnet into mkldnn-v1.0

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit fe1b1ea274610bf43f46ddc950f7724c1b8731f0
Merge: 61132d0 a75ec06
Author: Tao Lv 
AuthorDate: Sun Oct 20 07:24:55 2019 +0800

Merge branch 'master' of https://github.com/apache/incubator-mxnet into 
mkldnn-v1.0

 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |   22 +-
 src/operator/image/crop.cc |4 +-
 src/operator/image/crop.cu |   34 +
 src/operator/mxnet_op.h|   36 +
 src/operator/nn/dropout-inl.h  |   12 +-
 src/operator/numpy/np_dot-inl.h|   16 +-
 src/operator/numpy/np_dot.cc   |4 +-
 src/operator/numpy/np_einsum_op-inl.h  | 1092 
 src/operator/numpy/np_einsum_op.cc |  370 +++
 src/operator/numpy/np_einsum_op.cu |   36 +
 src/operator/numpy/np_einsum_path_op-inl.h |  964 +
 src/operator/numpy/np_elemwise_broadcast_op.cc |2 +
 src/operator/numpy/np_tensordot_op-inl.h   |  120 ++-
 src/operator/numpy/np_tensordot_op.cc  |8 +-
 src/operator/pad.cc|  326 +++---
 .../mkldnn/mkldnn_quantized_fully_connected.cc |9 +-
 src/operator/subgraph/mkldnn/mkldnn_fc.cc  |   12 +-
 src/operator/swapaxis-inl.h|   12 +-
 .../tensor/elemwise_binary_broadcast_op-inl.cuh|   18 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |5 +-
 src/operator/tensor/elemwise_sum.h |8 +-
 tests/nightly/test_large_array.py  |  304 ++
 tests/nightly/test_large_vector.py |  222 
 tests/python/gpu/test_gluon_transforms.py  |   74

[incubator-mxnet] branch mkldnn-v1.0 updated (61132d0 -> fe1b1ea)

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 61132d0  remove MKL license (#16534)
 add bf57ff8  added more tests to verify support for large vector (#16477)
 add de524bb  Fixing broken links (#16500)
 add a4ea4a8  Load NDArray only to GPU if GPU is present (#16432)
 add d1200c9  add binary and docs build command options (#16514)
 add e4f8c50  [MKLDNN] Fix uint quantized fc when not fusing with 
requantize (#16523)
 add f6cfbdf  improve unary and binary operator handling and refactor tests 
(#16423)
 add 77e9898  Bug fix for the input of same axes of the swapaxes operator 
(#16513)
 add f2ed1d4  added support for large tensors for Dropout operator and 
tests to verify support for more operators (#16409)
 add 63fbfb1  [DOC] Fix numpy op doc  (#16504)
 add f01bcaa  [Numpy] More numpy dispatch tests (#16426)
 add 27f7082  Fix learning rate scheduler being unexpectedly overwritten by 
optimizer's default value (#16487)
 add 73bff7d  adding large tensor support for add_n and tests for more ops 
(#16476)
 add 4b8a95f  add option to remove indexes (#16525)
 add 32bb374  disable tests (#16536)
 add efa5369  adding large tensor support for pad operator (#15126)
 add 2d4c3a4  fix pylint in CI (#16540)
 add 27b3e52  image crop gpu (#16464)
 add a75ec06  [Numpy] einsum (#15911)
 new fe1b1ea  Merge branch 'master' of 
https://github.com/apache/incubator-mxnet into mkldnn-v1.0

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |  

[incubator-mxnet] 01/01: Merge branch 'master' of https://github.com/apache/incubator-mxnet into mkldnn-v1.0

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit fe1b1ea274610bf43f46ddc950f7724c1b8731f0
Merge: 61132d0 a75ec06
Author: Tao Lv 
AuthorDate: Sun Oct 20 07:24:55 2019 +0800

Merge branch 'master' of https://github.com/apache/incubator-mxnet into 
mkldnn-v1.0

 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |   22 +-
 src/operator/image/crop.cc |4 +-
 src/operator/image/crop.cu |   34 +
 src/operator/mxnet_op.h|   36 +
 src/operator/nn/dropout-inl.h  |   12 +-
 src/operator/numpy/np_dot-inl.h|   16 +-
 src/operator/numpy/np_dot.cc   |4 +-
 src/operator/numpy/np_einsum_op-inl.h  | 1092 
 src/operator/numpy/np_einsum_op.cc |  370 +++
 src/operator/numpy/np_einsum_op.cu |   36 +
 src/operator/numpy/np_einsum_path_op-inl.h |  964 +
 src/operator/numpy/np_elemwise_broadcast_op.cc |2 +
 src/operator/numpy/np_tensordot_op-inl.h   |  120 ++-
 src/operator/numpy/np_tensordot_op.cc  |8 +-
 src/operator/pad.cc|  326 +++---
 .../mkldnn/mkldnn_quantized_fully_connected.cc |9 +-
 src/operator/subgraph/mkldnn/mkldnn_fc.cc  |   12 +-
 src/operator/swapaxis-inl.h|   12 +-
 .../tensor/elemwise_binary_broadcast_op-inl.cuh|   18 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |5 +-
 src/operator/tensor/elemwise_sum.h |8 +-
 tests/nightly/test_large_array.py  |  304 ++
 tests/nightly/test_large_vector.py |  222 
 tests/python/gpu/test_gluon_transforms.py  |   74

[incubator-mxnet] 01/01: Merge branch 'master' of https://github.com/apache/incubator-mxnet into mkldnn-v1.0

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit fe1b1ea274610bf43f46ddc950f7724c1b8731f0
Merge: 61132d0 a75ec06
Author: Tao Lv 
AuthorDate: Sun Oct 20 07:24:55 2019 +0800

Merge branch 'master' of https://github.com/apache/incubator-mxnet into 
mkldnn-v1.0

 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |   22 +-
 src/operator/image/crop.cc |4 +-
 src/operator/image/crop.cu |   34 +
 src/operator/mxnet_op.h|   36 +
 src/operator/nn/dropout-inl.h  |   12 +-
 src/operator/numpy/np_dot-inl.h|   16 +-
 src/operator/numpy/np_dot.cc   |4 +-
 src/operator/numpy/np_einsum_op-inl.h  | 1092 
 src/operator/numpy/np_einsum_op.cc |  370 +++
 src/operator/numpy/np_einsum_op.cu |   36 +
 src/operator/numpy/np_einsum_path_op-inl.h |  964 +
 src/operator/numpy/np_elemwise_broadcast_op.cc |2 +
 src/operator/numpy/np_tensordot_op-inl.h   |  120 ++-
 src/operator/numpy/np_tensordot_op.cc  |8 +-
 src/operator/pad.cc|  326 +++---
 .../mkldnn/mkldnn_quantized_fully_connected.cc |9 +-
 src/operator/subgraph/mkldnn/mkldnn_fc.cc  |   12 +-
 src/operator/swapaxis-inl.h|   12 +-
 .../tensor/elemwise_binary_broadcast_op-inl.cuh|   18 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |5 +-
 src/operator/tensor/elemwise_sum.h |8 +-
 tests/nightly/test_large_array.py  |  304 ++
 tests/nightly/test_large_vector.py |  222 
 tests/python/gpu/test_gluon_transforms.py  |   74

[incubator-mxnet] branch mkldnn-v1.0 updated (61132d0 -> fe1b1ea)

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 61132d0  remove MKL license (#16534)
 add bf57ff8  added more tests to verify support for large vector (#16477)
 add de524bb  Fixing broken links (#16500)
 add a4ea4a8  Load NDArray only to GPU if GPU is present (#16432)
 add d1200c9  add binary and docs build command options (#16514)
 add e4f8c50  [MKLDNN] Fix uint quantized fc when not fusing with 
requantize (#16523)
 add f6cfbdf  improve unary and binary operator handling and refactor tests 
(#16423)
 add 77e9898  Bug fix for the input of same axes of the swapaxes operator 
(#16513)
 add f2ed1d4  added support for large tensors for Dropout operator and 
tests to verify support for more operators (#16409)
 add 63fbfb1  [DOC] Fix numpy op doc  (#16504)
 add f01bcaa  [Numpy] More numpy dispatch tests (#16426)
 add 27f7082  Fix learning rate scheduler being unexpectedly overwritten by 
optimizer's default value (#16487)
 add 73bff7d  adding large tensor support for add_n and tests for more ops 
(#16476)
 add 4b8a95f  add option to remove indexes (#16525)
 add 32bb374  disable tests (#16536)
 add efa5369  adding large tensor support for pad operator (#15126)
 add 2d4c3a4  fix pylint in CI (#16540)
 add 27b3e52  image crop gpu (#16464)
 add a75ec06  [Numpy] einsum (#15911)
 new fe1b1ea  Merge branch 'master' of 
https://github.com/apache/incubator-mxnet into mkldnn-v1.0

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |  

[incubator-mxnet] branch mkldnn-v1.0 updated (61132d0 -> fe1b1ea)

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 61132d0  remove MKL license (#16534)
 add bf57ff8  added more tests to verify support for large vector (#16477)
 add de524bb  Fixing broken links (#16500)
 add a4ea4a8  Load NDArray only to GPU if GPU is present (#16432)
 add d1200c9  add binary and docs build command options (#16514)
 add e4f8c50  [MKLDNN] Fix uint quantized fc when not fusing with 
requantize (#16523)
 add f6cfbdf  improve unary and binary operator handling and refactor tests 
(#16423)
 add 77e9898  Bug fix for the input of same axes of the swapaxes operator 
(#16513)
 add f2ed1d4  added support for large tensors for Dropout operator and 
tests to verify support for more operators (#16409)
 add 63fbfb1  [DOC] Fix numpy op doc  (#16504)
 add f01bcaa  [Numpy] More numpy dispatch tests (#16426)
 add 27f7082  Fix learning rate scheduler being unexpectedly overwritten by 
optimizer's default value (#16487)
 add 73bff7d  adding large tensor support for add_n and tests for more ops 
(#16476)
 add 4b8a95f  add option to remove indexes (#16525)
 add 32bb374  disable tests (#16536)
 add efa5369  adding large tensor support for pad operator (#15126)
 add 2d4c3a4  fix pylint in CI (#16540)
 add 27b3e52  image crop gpu (#16464)
 add a75ec06  [Numpy] einsum (#15911)
 new fe1b1ea  Merge branch 'master' of 
https://github.com/apache/incubator-mxnet into mkldnn-v1.0

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |  

[incubator-mxnet] 01/01: Merge branch 'master' of https://github.com/apache/incubator-mxnet into mkldnn-v1.0

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit fe1b1ea274610bf43f46ddc950f7724c1b8731f0
Merge: 61132d0 a75ec06
Author: Tao Lv 
AuthorDate: Sun Oct 20 07:24:55 2019 +0800

Merge branch 'master' of https://github.com/apache/incubator-mxnet into 
mkldnn-v1.0

 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |   22 +-
 src/operator/image/crop.cc |4 +-
 src/operator/image/crop.cu |   34 +
 src/operator/mxnet_op.h|   36 +
 src/operator/nn/dropout-inl.h  |   12 +-
 src/operator/numpy/np_dot-inl.h|   16 +-
 src/operator/numpy/np_dot.cc   |4 +-
 src/operator/numpy/np_einsum_op-inl.h  | 1092 
 src/operator/numpy/np_einsum_op.cc |  370 +++
 src/operator/numpy/np_einsum_op.cu |   36 +
 src/operator/numpy/np_einsum_path_op-inl.h |  964 +
 src/operator/numpy/np_elemwise_broadcast_op.cc |2 +
 src/operator/numpy/np_tensordot_op-inl.h   |  120 ++-
 src/operator/numpy/np_tensordot_op.cc  |8 +-
 src/operator/pad.cc|  326 +++---
 .../mkldnn/mkldnn_quantized_fully_connected.cc |9 +-
 src/operator/subgraph/mkldnn/mkldnn_fc.cc  |   12 +-
 src/operator/swapaxis-inl.h|   12 +-
 .../tensor/elemwise_binary_broadcast_op-inl.cuh|   18 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |5 +-
 src/operator/tensor/elemwise_sum.h |8 +-
 tests/nightly/test_large_array.py  |  304 ++
 tests/nightly/test_large_vector.py |  222 
 tests/python/gpu/test_gluon_transforms.py  |   74

[incubator-mxnet] 01/01: Merge branch 'master' of https://github.com/apache/incubator-mxnet into mkldnn-v1.0

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit fe1b1ea274610bf43f46ddc950f7724c1b8731f0
Merge: 61132d0 a75ec06
Author: Tao Lv 
AuthorDate: Sun Oct 20 07:24:55 2019 +0800

Merge branch 'master' of https://github.com/apache/incubator-mxnet into 
mkldnn-v1.0

 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |   22 +-
 src/operator/image/crop.cc |4 +-
 src/operator/image/crop.cu |   34 +
 src/operator/mxnet_op.h|   36 +
 src/operator/nn/dropout-inl.h  |   12 +-
 src/operator/numpy/np_dot-inl.h|   16 +-
 src/operator/numpy/np_dot.cc   |4 +-
 src/operator/numpy/np_einsum_op-inl.h  | 1092 
 src/operator/numpy/np_einsum_op.cc |  370 +++
 src/operator/numpy/np_einsum_op.cu |   36 +
 src/operator/numpy/np_einsum_path_op-inl.h |  964 +
 src/operator/numpy/np_elemwise_broadcast_op.cc |2 +
 src/operator/numpy/np_tensordot_op-inl.h   |  120 ++-
 src/operator/numpy/np_tensordot_op.cc  |8 +-
 src/operator/pad.cc|  326 +++---
 .../mkldnn/mkldnn_quantized_fully_connected.cc |9 +-
 src/operator/subgraph/mkldnn/mkldnn_fc.cc  |   12 +-
 src/operator/swapaxis-inl.h|   12 +-
 .../tensor/elemwise_binary_broadcast_op-inl.cuh|   18 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |5 +-
 src/operator/tensor/elemwise_sum.h |8 +-
 tests/nightly/test_large_array.py  |  304 ++
 tests/nightly/test_large_vector.py |  222 
 tests/python/gpu/test_gluon_transforms.py  |   74

[incubator-mxnet] branch mkldnn-v1.0 updated (61132d0 -> fe1b1ea)

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 61132d0  remove MKL license (#16534)
 add bf57ff8  added more tests to verify support for large vector (#16477)
 add de524bb  Fixing broken links (#16500)
 add a4ea4a8  Load NDArray only to GPU if GPU is present (#16432)
 add d1200c9  add binary and docs build command options (#16514)
 add e4f8c50  [MKLDNN] Fix uint quantized fc when not fusing with 
requantize (#16523)
 add f6cfbdf  improve unary and binary operator handling and refactor tests 
(#16423)
 add 77e9898  Bug fix for the input of same axes of the swapaxes operator 
(#16513)
 add f2ed1d4  added support for large tensors for Dropout operator and 
tests to verify support for more operators (#16409)
 add 63fbfb1  [DOC] Fix numpy op doc  (#16504)
 add f01bcaa  [Numpy] More numpy dispatch tests (#16426)
 add 27f7082  Fix learning rate scheduler being unexpectedly overwritten by 
optimizer's default value (#16487)
 add 73bff7d  adding large tensor support for add_n and tests for more ops 
(#16476)
 add 4b8a95f  add option to remove indexes (#16525)
 add 32bb374  disable tests (#16536)
 add efa5369  adding large tensor support for pad operator (#15126)
 add 2d4c3a4  fix pylint in CI (#16540)
 add 27b3e52  image crop gpu (#16464)
 add a75ec06  [Numpy] einsum (#15911)
 new fe1b1ea  Merge branch 'master' of 
https://github.com/apache/incubator-mxnet into mkldnn-v1.0

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .travis.yml|4 +-
 Makefile   |2 +-
 benchmark/python/einsum/benchmark_einsum.py|   78 ++
 ci/docker/runtime_functions.sh |4 +-
 dev_menu.py|   20 +-
 .../python/tutorials/deploy/export/onnx.md |4 +-
 .../python/tutorials/deploy/run-on-aws/cloud.md|   29 +
 .../python/tutorials/deploy/run-on-aws/cloud.rst   |  105 --
 .../python/tutorials/deploy/run-on-aws/index.rst   |2 +-
 .../gluon_from_experiment_to_deployment.md |4 +-
 .../tutorials/getting-started/to-mxnet/index.rst   |2 +-
 .../python/tutorials/packages/autograd/index.md|   10 +-
 .../gluon/blocks/custom_layer_beginners.md |   30 +-
 .../tutorials/packages/gluon/blocks/hybridize.md   |   12 +-
 .../python/tutorials/packages/gluon/blocks/init.md |   12 +-
 .../python/tutorials/packages/gluon/blocks/nn.md   |   24 +-
 .../tutorials/packages/gluon/blocks/parameters.md  |   20 +-
 .../packages/gluon/image/image-augmentation.md |   20 +-
 .../packages/gluon/image/pretrained_models.md  |   14 +-
 .../python/tutorials/packages/gluon/index.rst  |   12 +-
 .../tutorials/packages/gluon/loss/custom-loss.md   |   28 +-
 .../python/tutorials/packages/gluon/loss/loss.md   |   52 +-
 .../packages/gluon/training/fit_api_tutorial.md|   68 +-
 .../learning_rates/learning_rate_finder.md |   18 +-
 .../learning_rate_schedules_advanced.md|   22 +-
 .../tutorials/packages/gluon/training/trainer.md   |   24 +-
 .../python/tutorials/packages/kvstore/index.rst|7 +-
 .../python/tutorials/packages/kvstore/kvstore.md   |4 +-
 .../tutorials/packages/ndarray/01-ndarray-intro.md |   14 +-
 .../packages/ndarray/02-ndarray-operations.md  |   39 +-
 .../packages/ndarray/03-ndarray-contexts.md|4 +-
 .../packages/ndarray/gotchas_numpy_in_mxnet.md |   38 +-
 .../packages/ndarray/sparse/row_sparse.md  |   28 +-
 .../tutorials/packages/onnx/fine_tuning_gluon.md   |   12 +-
 .../packages/onnx/inference_on_onnx_model.md   |2 +-
 .../tutorials/packages/onnx/super_resolution.md|4 +-
 docs/static_site/src/.htaccess |1 +
 .../src/pages/api/faq/distributed_training.md  |4 +-
 .../docs/tutorials/five_minutes_neural_network.md  |2 +-
 python/mxnet/_numpy_op_doc.py  |   69 ++
 python/mxnet/ndarray/numpy/_op.py  |  599 +--
 python/mxnet/numpy/multiarray.py   |  582 +--
 python/mxnet/numpy/stride_tricks.py|2 +-
 python/mxnet/numpy_dispatch_protocol.py|4 +
 python/mxnet/optimizer/optimizer.py|   20 +-
 python/mxnet/symbol/numpy/_symbol.py   |  362 +--
 python/mxnet/util.py   |  106 ++
 src/ndarray/ndarray.cc |8 +-
 src/operator/contrib/bilinear_resize-inl.cuh   |8 +-
 src/operator/image/crop-inl.h  |  

[incubator-mxnet] branch mkldnn-v1.0 updated (6eadab3 -> 61132d0)

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 6eadab3  [mkldnn-v1.0]Minor fix for leakyrelu compile flag (#16519)
 add 61132d0  remove MKL license (#16534)

No new revisions were added by this update.

Summary of changes:
 LICENSE | 51 ---
 1 file changed, 8 insertions(+), 43 deletions(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (6eadab3 -> 61132d0)

2019-10-19 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 6eadab3  [mkldnn-v1.0]Minor fix for leakyrelu compile flag (#16519)
 add 61132d0  remove MKL license (#16534)

No new revisions were added by this update.

Summary of changes:
 LICENSE | 51 ---
 1 file changed, 8 insertions(+), 43 deletions(-)



[incubator-mxnet] branch mkldnn-v1.0 updated (b5cdabe -> ba3229b)

2019-10-17 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5cdabe  [mkldnn-v1.0] Enable mkldnn cpp-test, copy op, concat op 
(#16503)
 add 1d4ede3  Add mask target generator operator for Mask-RCNN (#16268)
 add 8820220  Adds pip requirements file to nightly gpu ci image (#16472)
 add 1256976  Fix Nightly Tests for Binaries (#16451)
 add 812e504  fix autodoc for spurrious toggles (#16452)
 add 7ce  Fix dtype bug (#16467)
 add 9ab428e  [Doc] Update the download page with 1.5.1 release (#16442)
 add 6e0b1a5  [Numpy] Numpy compatible dstack (#15871)
 add ceebcaf  numpy eye op (#16132)
 add 8222979  Numpy compatible vsplit; minor changes to split (#15983)
 add 8562adc  add numpy op logspace (#15825)
 add 9681197  add numpy op bitwise_xor, hsplit, moveaxis, rot90 (#16257)
 add f9359c3  Fix flakey pylint CI failures (#16462)
 add 67e1e68  Aggregated zero grad (#16446)
 add b1932c0  Move MRCNNMaskTarget op to contrib (#16486)
 add 06438ab  Mxnet allclose (#14443)
 add 0c00a79  Fix optimizer bug for np attribute (#16494)
 add c2bbde7  Tests of NumPy interoperability (#16469)
 add ba3229b  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 ci/docker/Dockerfile.build.ubuntu_nightly_gpu  |   1 +
 ci/other/pylintrc  |   7 +-
 docs/python_docs/_static/autodoc.js|  34 +-
 .../src/_includes/get_started/get_started.html |   6 +-
 .../src/_includes/get_started/pip_snippet.md   |   2 +-
 docs/static_site/src/pages/get_started/download.md |   1 +
 python/mxnet/_numpy_op_doc.py  |  44 ++
 python/mxnet/gluon/parameter.py|  21 +-
 python/mxnet/ndarray/numpy/_op.py  | 416 -
 python/mxnet/numpy/multiarray.py   | 392 +++-
 python/mxnet/numpy_dispatch_protocol.py|   2 +
 python/mxnet/numpy_op_signature.py |   5 +-
 python/mxnet/optimizer/optimizer.py|   2 +-
 python/mxnet/symbol/numpy/_symbol.py   | 362 ++-
 python/mxnet/test_utils.py | 188 --
 src/operator/contrib/allclose_op-inl.h | 160 +
 src/operator/contrib/allclose_op.cc|  86 +++
 src/operator/contrib/allclose_op.cu|  58 ++
 src/operator/contrib/mrcnn_mask_target-inl.h   | 132 
 src/operator/contrib/mrcnn_mask_target.cu  | 278 +
 src/operator/contrib/reset_arrays-inl.h|  92 +++
 src/operator/contrib/reset_arrays.cc   |  74 +++
 .../contrib/{multi_lars.cu => reset_arrays.cu} |  18 +-
 src/operator/mshadow_op.h  |   2 +
 src/operator/nn/concat-inl.h   |  62 ++
 src/operator/numpy/np_elemwise_broadcast_op.cu |   4 +
 src/operator/numpy/np_init_op.cc   |  53 +-
 src/operator/numpy/np_init_op.cu   |   6 +
 src/operator/numpy/np_init_op.h| 132 
 src/operator/numpy/np_matrix_op-inl.h  | 367 
 src/operator/numpy/np_matrix_op.cc | 268 -
 src/operator/numpy/np_matrix_op.cu |  19 +
 src/operator/operator_tune.cc  |   1 +
 src/operator/tensor/init_op.h  |  40 +-
 src/operator/tensor/matrix_op-inl.h| 115 ++--
 tests/python-pytest/onnx/mxnet_export_test.py  |   2 +-
 tests/python/gpu/test_gluon_gpu.py | 136 -
 tests/python/gpu/test_gluon_model_zoo_gpu.py   |  14 +-
 tests/python/gpu/test_operator_gpu.py  |  59 +-
 tests/python/mkl/test_mkldnn.py|  12 +-
 tests/python/unittest/test_contrib_operator.py |  58 ++
 tests/python/unittest/test_gluon.py|  66 +-
 tests/python/unittest/test_gluon_contrib.py|  21 +-
 tests/python/unittest/test_loss.py |  31 +-
 tests/python/unittest/test_ndarray.py  |  27 +-
 tests/python/unittest/test_numpy_gluon.py  |   9 +
 .../python/unittest/test_numpy_interoperability.py | 160 -
 tests/python/unittest/test_numpy_ndarray.py|  23 +
 tests/python/unittest/test_numpy_op.py | 382 
 tests/python/unittest/test_operator.py | 663 +
 tests/python/unittest/test_random.py   |  16 +-
 tests/python/unittest/test_sparse_operator.py  |   4 +-
 tests/python/unittest/test_subgraph.py |   7 -
 tests/utils/notebook_test/__init__.py  |   2 +-
 54 files changed, 4439 insertions(+), 703 deletions(-)
 create mode 100644 src/operator/contrib/allclose_op-inl.h
 create mode 100644 src/operator/contrib/allclose_op

[incubator-mxnet] branch mkldnn-v1.0 updated (b5cdabe -> ba3229b)

2019-10-17 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5cdabe  [mkldnn-v1.0] Enable mkldnn cpp-test, copy op, concat op 
(#16503)
 add 1d4ede3  Add mask target generator operator for Mask-RCNN (#16268)
 add 8820220  Adds pip requirements file to nightly gpu ci image (#16472)
 add 1256976  Fix Nightly Tests for Binaries (#16451)
 add 812e504  fix autodoc for spurrious toggles (#16452)
 add 7ce  Fix dtype bug (#16467)
 add 9ab428e  [Doc] Update the download page with 1.5.1 release (#16442)
 add 6e0b1a5  [Numpy] Numpy compatible dstack (#15871)
 add ceebcaf  numpy eye op (#16132)
 add 8222979  Numpy compatible vsplit; minor changes to split (#15983)
 add 8562adc  add numpy op logspace (#15825)
 add 9681197  add numpy op bitwise_xor, hsplit, moveaxis, rot90 (#16257)
 add f9359c3  Fix flakey pylint CI failures (#16462)
 add 67e1e68  Aggregated zero grad (#16446)
 add b1932c0  Move MRCNNMaskTarget op to contrib (#16486)
 add 06438ab  Mxnet allclose (#14443)
 add 0c00a79  Fix optimizer bug for np attribute (#16494)
 add c2bbde7  Tests of NumPy interoperability (#16469)
 add ba3229b  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 ci/docker/Dockerfile.build.ubuntu_nightly_gpu  |   1 +
 ci/other/pylintrc  |   7 +-
 docs/python_docs/_static/autodoc.js|  34 +-
 .../src/_includes/get_started/get_started.html |   6 +-
 .../src/_includes/get_started/pip_snippet.md   |   2 +-
 docs/static_site/src/pages/get_started/download.md |   1 +
 python/mxnet/_numpy_op_doc.py  |  44 ++
 python/mxnet/gluon/parameter.py|  21 +-
 python/mxnet/ndarray/numpy/_op.py  | 416 -
 python/mxnet/numpy/multiarray.py   | 392 +++-
 python/mxnet/numpy_dispatch_protocol.py|   2 +
 python/mxnet/numpy_op_signature.py |   5 +-
 python/mxnet/optimizer/optimizer.py|   2 +-
 python/mxnet/symbol/numpy/_symbol.py   | 362 ++-
 python/mxnet/test_utils.py | 188 --
 src/operator/contrib/allclose_op-inl.h | 160 +
 src/operator/contrib/allclose_op.cc|  86 +++
 src/operator/contrib/allclose_op.cu|  58 ++
 src/operator/contrib/mrcnn_mask_target-inl.h   | 132 
 src/operator/contrib/mrcnn_mask_target.cu  | 278 +
 src/operator/contrib/reset_arrays-inl.h|  92 +++
 src/operator/contrib/reset_arrays.cc   |  74 +++
 .../contrib/{multi_lars.cu => reset_arrays.cu} |  18 +-
 src/operator/mshadow_op.h  |   2 +
 src/operator/nn/concat-inl.h   |  62 ++
 src/operator/numpy/np_elemwise_broadcast_op.cu |   4 +
 src/operator/numpy/np_init_op.cc   |  53 +-
 src/operator/numpy/np_init_op.cu   |   6 +
 src/operator/numpy/np_init_op.h| 132 
 src/operator/numpy/np_matrix_op-inl.h  | 367 
 src/operator/numpy/np_matrix_op.cc | 268 -
 src/operator/numpy/np_matrix_op.cu |  19 +
 src/operator/operator_tune.cc  |   1 +
 src/operator/tensor/init_op.h  |  40 +-
 src/operator/tensor/matrix_op-inl.h| 115 ++--
 tests/python-pytest/onnx/mxnet_export_test.py  |   2 +-
 tests/python/gpu/test_gluon_gpu.py | 136 -
 tests/python/gpu/test_gluon_model_zoo_gpu.py   |  14 +-
 tests/python/gpu/test_operator_gpu.py  |  59 +-
 tests/python/mkl/test_mkldnn.py|  12 +-
 tests/python/unittest/test_contrib_operator.py |  58 ++
 tests/python/unittest/test_gluon.py|  66 +-
 tests/python/unittest/test_gluon_contrib.py|  21 +-
 tests/python/unittest/test_loss.py |  31 +-
 tests/python/unittest/test_ndarray.py  |  27 +-
 tests/python/unittest/test_numpy_gluon.py  |   9 +
 .../python/unittest/test_numpy_interoperability.py | 160 -
 tests/python/unittest/test_numpy_ndarray.py|  23 +
 tests/python/unittest/test_numpy_op.py | 382 
 tests/python/unittest/test_operator.py | 663 +
 tests/python/unittest/test_random.py   |  16 +-
 tests/python/unittest/test_sparse_operator.py  |   4 +-
 tests/python/unittest/test_subgraph.py |   7 -
 tests/utils/notebook_test/__init__.py  |   2 +-
 54 files changed, 4439 insertions(+), 703 deletions(-)
 create mode 100644 src/operator/contrib/allclose_op-inl.h
 create mode 100644 src/operator/contrib/allclose_op

[incubator-mxnet] branch mkldnn-v1.0 updated (b5cdabe -> ba3229b)

2019-10-17 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5cdabe  [mkldnn-v1.0] Enable mkldnn cpp-test, copy op, concat op 
(#16503)
 add 1d4ede3  Add mask target generator operator for Mask-RCNN (#16268)
 add 8820220  Adds pip requirements file to nightly gpu ci image (#16472)
 add 1256976  Fix Nightly Tests for Binaries (#16451)
 add 812e504  fix autodoc for spurrious toggles (#16452)
 add 7ce  Fix dtype bug (#16467)
 add 9ab428e  [Doc] Update the download page with 1.5.1 release (#16442)
 add 6e0b1a5  [Numpy] Numpy compatible dstack (#15871)
 add ceebcaf  numpy eye op (#16132)
 add 8222979  Numpy compatible vsplit; minor changes to split (#15983)
 add 8562adc  add numpy op logspace (#15825)
 add 9681197  add numpy op bitwise_xor, hsplit, moveaxis, rot90 (#16257)
 add f9359c3  Fix flakey pylint CI failures (#16462)
 add 67e1e68  Aggregated zero grad (#16446)
 add b1932c0  Move MRCNNMaskTarget op to contrib (#16486)
 add 06438ab  Mxnet allclose (#14443)
 add 0c00a79  Fix optimizer bug for np attribute (#16494)
 add c2bbde7  Tests of NumPy interoperability (#16469)
 add ba3229b  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 ci/docker/Dockerfile.build.ubuntu_nightly_gpu  |   1 +
 ci/other/pylintrc  |   7 +-
 docs/python_docs/_static/autodoc.js|  34 +-
 .../src/_includes/get_started/get_started.html |   6 +-
 .../src/_includes/get_started/pip_snippet.md   |   2 +-
 docs/static_site/src/pages/get_started/download.md |   1 +
 python/mxnet/_numpy_op_doc.py  |  44 ++
 python/mxnet/gluon/parameter.py|  21 +-
 python/mxnet/ndarray/numpy/_op.py  | 416 -
 python/mxnet/numpy/multiarray.py   | 392 +++-
 python/mxnet/numpy_dispatch_protocol.py|   2 +
 python/mxnet/numpy_op_signature.py |   5 +-
 python/mxnet/optimizer/optimizer.py|   2 +-
 python/mxnet/symbol/numpy/_symbol.py   | 362 ++-
 python/mxnet/test_utils.py | 188 --
 src/operator/contrib/allclose_op-inl.h | 160 +
 src/operator/contrib/allclose_op.cc|  86 +++
 src/operator/contrib/allclose_op.cu|  58 ++
 src/operator/contrib/mrcnn_mask_target-inl.h   | 132 
 src/operator/contrib/mrcnn_mask_target.cu  | 278 +
 src/operator/contrib/reset_arrays-inl.h|  92 +++
 src/operator/contrib/reset_arrays.cc   |  74 +++
 .../contrib/{multi_lars.cu => reset_arrays.cu} |  18 +-
 src/operator/mshadow_op.h  |   2 +
 src/operator/nn/concat-inl.h   |  62 ++
 src/operator/numpy/np_elemwise_broadcast_op.cu |   4 +
 src/operator/numpy/np_init_op.cc   |  53 +-
 src/operator/numpy/np_init_op.cu   |   6 +
 src/operator/numpy/np_init_op.h| 132 
 src/operator/numpy/np_matrix_op-inl.h  | 367 
 src/operator/numpy/np_matrix_op.cc | 268 -
 src/operator/numpy/np_matrix_op.cu |  19 +
 src/operator/operator_tune.cc  |   1 +
 src/operator/tensor/init_op.h  |  40 +-
 src/operator/tensor/matrix_op-inl.h| 115 ++--
 tests/python-pytest/onnx/mxnet_export_test.py  |   2 +-
 tests/python/gpu/test_gluon_gpu.py | 136 -
 tests/python/gpu/test_gluon_model_zoo_gpu.py   |  14 +-
 tests/python/gpu/test_operator_gpu.py  |  59 +-
 tests/python/mkl/test_mkldnn.py|  12 +-
 tests/python/unittest/test_contrib_operator.py |  58 ++
 tests/python/unittest/test_gluon.py|  66 +-
 tests/python/unittest/test_gluon_contrib.py|  21 +-
 tests/python/unittest/test_loss.py |  31 +-
 tests/python/unittest/test_ndarray.py  |  27 +-
 tests/python/unittest/test_numpy_gluon.py  |   9 +
 .../python/unittest/test_numpy_interoperability.py | 160 -
 tests/python/unittest/test_numpy_ndarray.py|  23 +
 tests/python/unittest/test_numpy_op.py | 382 
 tests/python/unittest/test_operator.py | 663 +
 tests/python/unittest/test_random.py   |  16 +-
 tests/python/unittest/test_sparse_operator.py  |   4 +-
 tests/python/unittest/test_subgraph.py |   7 -
 tests/utils/notebook_test/__init__.py  |   2 +-
 54 files changed, 4439 insertions(+), 703 deletions(-)
 create mode 100644 src/operator/contrib/allclose_op-inl.h
 create mode 100644 src/operator/contrib/allclose_op

[incubator-mxnet] branch mkldnn-v1.0 updated (b5cdabe -> ba3229b)

2019-10-17 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5cdabe  [mkldnn-v1.0] Enable mkldnn cpp-test, copy op, concat op 
(#16503)
 add 1d4ede3  Add mask target generator operator for Mask-RCNN (#16268)
 add 8820220  Adds pip requirements file to nightly gpu ci image (#16472)
 add 1256976  Fix Nightly Tests for Binaries (#16451)
 add 812e504  fix autodoc for spurrious toggles (#16452)
 add 7ce  Fix dtype bug (#16467)
 add 9ab428e  [Doc] Update the download page with 1.5.1 release (#16442)
 add 6e0b1a5  [Numpy] Numpy compatible dstack (#15871)
 add ceebcaf  numpy eye op (#16132)
 add 8222979  Numpy compatible vsplit; minor changes to split (#15983)
 add 8562adc  add numpy op logspace (#15825)
 add 9681197  add numpy op bitwise_xor, hsplit, moveaxis, rot90 (#16257)
 add f9359c3  Fix flakey pylint CI failures (#16462)
 add 67e1e68  Aggregated zero grad (#16446)
 add b1932c0  Move MRCNNMaskTarget op to contrib (#16486)
 add 06438ab  Mxnet allclose (#14443)
 add 0c00a79  Fix optimizer bug for np attribute (#16494)
 add c2bbde7  Tests of NumPy interoperability (#16469)
 add ba3229b  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 ci/docker/Dockerfile.build.ubuntu_nightly_gpu  |   1 +
 ci/other/pylintrc  |   7 +-
 docs/python_docs/_static/autodoc.js|  34 +-
 .../src/_includes/get_started/get_started.html |   6 +-
 .../src/_includes/get_started/pip_snippet.md   |   2 +-
 docs/static_site/src/pages/get_started/download.md |   1 +
 python/mxnet/_numpy_op_doc.py  |  44 ++
 python/mxnet/gluon/parameter.py|  21 +-
 python/mxnet/ndarray/numpy/_op.py  | 416 -
 python/mxnet/numpy/multiarray.py   | 392 +++-
 python/mxnet/numpy_dispatch_protocol.py|   2 +
 python/mxnet/numpy_op_signature.py |   5 +-
 python/mxnet/optimizer/optimizer.py|   2 +-
 python/mxnet/symbol/numpy/_symbol.py   | 362 ++-
 python/mxnet/test_utils.py | 188 --
 src/operator/contrib/allclose_op-inl.h | 160 +
 src/operator/contrib/allclose_op.cc|  86 +++
 src/operator/contrib/allclose_op.cu|  58 ++
 src/operator/contrib/mrcnn_mask_target-inl.h   | 132 
 src/operator/contrib/mrcnn_mask_target.cu  | 278 +
 src/operator/contrib/reset_arrays-inl.h|  92 +++
 src/operator/contrib/reset_arrays.cc   |  74 +++
 .../contrib/{multi_lars.cu => reset_arrays.cu} |  18 +-
 src/operator/mshadow_op.h  |   2 +
 src/operator/nn/concat-inl.h   |  62 ++
 src/operator/numpy/np_elemwise_broadcast_op.cu |   4 +
 src/operator/numpy/np_init_op.cc   |  53 +-
 src/operator/numpy/np_init_op.cu   |   6 +
 src/operator/numpy/np_init_op.h| 132 
 src/operator/numpy/np_matrix_op-inl.h  | 367 
 src/operator/numpy/np_matrix_op.cc | 268 -
 src/operator/numpy/np_matrix_op.cu |  19 +
 src/operator/operator_tune.cc  |   1 +
 src/operator/tensor/init_op.h  |  40 +-
 src/operator/tensor/matrix_op-inl.h| 115 ++--
 tests/python-pytest/onnx/mxnet_export_test.py  |   2 +-
 tests/python/gpu/test_gluon_gpu.py | 136 -
 tests/python/gpu/test_gluon_model_zoo_gpu.py   |  14 +-
 tests/python/gpu/test_operator_gpu.py  |  59 +-
 tests/python/mkl/test_mkldnn.py|  12 +-
 tests/python/unittest/test_contrib_operator.py |  58 ++
 tests/python/unittest/test_gluon.py|  66 +-
 tests/python/unittest/test_gluon_contrib.py|  21 +-
 tests/python/unittest/test_loss.py |  31 +-
 tests/python/unittest/test_ndarray.py  |  27 +-
 tests/python/unittest/test_numpy_gluon.py  |   9 +
 .../python/unittest/test_numpy_interoperability.py | 160 -
 tests/python/unittest/test_numpy_ndarray.py|  23 +
 tests/python/unittest/test_numpy_op.py | 382 
 tests/python/unittest/test_operator.py | 663 +
 tests/python/unittest/test_random.py   |  16 +-
 tests/python/unittest/test_sparse_operator.py  |   4 +-
 tests/python/unittest/test_subgraph.py |   7 -
 tests/utils/notebook_test/__init__.py  |   2 +-
 54 files changed, 4439 insertions(+), 703 deletions(-)
 create mode 100644 src/operator/contrib/allclose_op-inl.h
 create mode 100644 src/operator/contrib/allclose_op

[incubator-mxnet] branch mkldnn-v1.0 updated (b5cdabe -> ba3229b)

2019-10-17 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from b5cdabe  [mkldnn-v1.0] Enable mkldnn cpp-test, copy op, concat op 
(#16503)
 add 1d4ede3  Add mask target generator operator for Mask-RCNN (#16268)
 add 8820220  Adds pip requirements file to nightly gpu ci image (#16472)
 add 1256976  Fix Nightly Tests for Binaries (#16451)
 add 812e504  fix autodoc for spurrious toggles (#16452)
 add 7ce  Fix dtype bug (#16467)
 add 9ab428e  [Doc] Update the download page with 1.5.1 release (#16442)
 add 6e0b1a5  [Numpy] Numpy compatible dstack (#15871)
 add ceebcaf  numpy eye op (#16132)
 add 8222979  Numpy compatible vsplit; minor changes to split (#15983)
 add 8562adc  add numpy op logspace (#15825)
 add 9681197  add numpy op bitwise_xor, hsplit, moveaxis, rot90 (#16257)
 add f9359c3  Fix flakey pylint CI failures (#16462)
 add 67e1e68  Aggregated zero grad (#16446)
 add b1932c0  Move MRCNNMaskTarget op to contrib (#16486)
 add 06438ab  Mxnet allclose (#14443)
 add 0c00a79  Fix optimizer bug for np attribute (#16494)
 add c2bbde7  Tests of NumPy interoperability (#16469)
 add ba3229b  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 ci/docker/Dockerfile.build.ubuntu_nightly_gpu  |   1 +
 ci/other/pylintrc  |   7 +-
 docs/python_docs/_static/autodoc.js|  34 +-
 .../src/_includes/get_started/get_started.html |   6 +-
 .../src/_includes/get_started/pip_snippet.md   |   2 +-
 docs/static_site/src/pages/get_started/download.md |   1 +
 python/mxnet/_numpy_op_doc.py  |  44 ++
 python/mxnet/gluon/parameter.py|  21 +-
 python/mxnet/ndarray/numpy/_op.py  | 416 -
 python/mxnet/numpy/multiarray.py   | 392 +++-
 python/mxnet/numpy_dispatch_protocol.py|   2 +
 python/mxnet/numpy_op_signature.py |   5 +-
 python/mxnet/optimizer/optimizer.py|   2 +-
 python/mxnet/symbol/numpy/_symbol.py   | 362 ++-
 python/mxnet/test_utils.py | 188 --
 src/operator/contrib/allclose_op-inl.h | 160 +
 src/operator/contrib/allclose_op.cc|  86 +++
 src/operator/contrib/allclose_op.cu|  58 ++
 src/operator/contrib/mrcnn_mask_target-inl.h   | 132 
 src/operator/contrib/mrcnn_mask_target.cu  | 278 +
 src/operator/contrib/reset_arrays-inl.h|  92 +++
 src/operator/contrib/reset_arrays.cc   |  74 +++
 .../contrib/{multi_lars.cu => reset_arrays.cu} |  18 +-
 src/operator/mshadow_op.h  |   2 +
 src/operator/nn/concat-inl.h   |  62 ++
 src/operator/numpy/np_elemwise_broadcast_op.cu |   4 +
 src/operator/numpy/np_init_op.cc   |  53 +-
 src/operator/numpy/np_init_op.cu   |   6 +
 src/operator/numpy/np_init_op.h| 132 
 src/operator/numpy/np_matrix_op-inl.h  | 367 
 src/operator/numpy/np_matrix_op.cc | 268 -
 src/operator/numpy/np_matrix_op.cu |  19 +
 src/operator/operator_tune.cc  |   1 +
 src/operator/tensor/init_op.h  |  40 +-
 src/operator/tensor/matrix_op-inl.h| 115 ++--
 tests/python-pytest/onnx/mxnet_export_test.py  |   2 +-
 tests/python/gpu/test_gluon_gpu.py | 136 -
 tests/python/gpu/test_gluon_model_zoo_gpu.py   |  14 +-
 tests/python/gpu/test_operator_gpu.py  |  59 +-
 tests/python/mkl/test_mkldnn.py|  12 +-
 tests/python/unittest/test_contrib_operator.py |  58 ++
 tests/python/unittest/test_gluon.py|  66 +-
 tests/python/unittest/test_gluon_contrib.py|  21 +-
 tests/python/unittest/test_loss.py |  31 +-
 tests/python/unittest/test_ndarray.py  |  27 +-
 tests/python/unittest/test_numpy_gluon.py  |   9 +
 .../python/unittest/test_numpy_interoperability.py | 160 -
 tests/python/unittest/test_numpy_ndarray.py|  23 +
 tests/python/unittest/test_numpy_op.py | 382 
 tests/python/unittest/test_operator.py | 663 +
 tests/python/unittest/test_random.py   |  16 +-
 tests/python/unittest/test_sparse_operator.py  |   4 +-
 tests/python/unittest/test_subgraph.py |   7 -
 tests/utils/notebook_test/__init__.py  |   2 +-
 54 files changed, 4439 insertions(+), 703 deletions(-)
 create mode 100644 src/operator/contrib/allclose_op-inl.h
 create mode 100644 src/operator/contrib/allclose_op

[incubator-mxnet] branch mkldnn-v1.0 updated (9f77575 -> 43e35a9)

2019-10-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 9f77575  [mkldnn-v1.0] Update enabling flag for MKL dropout (#16433)
 add d8193c6  Update add_op_in_backend.md (#16403)
 add 7f5e687  numpy-compatible histogram (#16266)
 add ca30ba8  Pseudo 2D transpose kernel (#16229)
 add d2d76dc  increase docker cache timeout (#16430)
 add 4dee4ee  Fix mkldnn reshape (#16455)
 add 1e8cc90  [BUGFIX] Minor type issues in Squeeze (#16448)
 add 858a52e  Fix large array tests (#16328)
 add 6d6e46b  Comparison ops implemented using mshadow (#16414)
 add 43e35a9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 CMakeLists.txt |   6 +-
 ci/docker_cache.py |   2 +-
 .../src/pages/api/faq/add_op_in_backend.md |   2 +-
 python/mxnet/_numpy_op_doc.py  |  51 +++
 python/mxnet/ndarray/numpy/_op.py  |  53 +++-
 python/mxnet/numpy/multiarray.py   |  53 +++-
 python/mxnet/numpy_extension/__init__.py   |   2 +-
 python/mxnet/symbol/numpy/_symbol.py   |  85 -
 python/mxnet/test_utils.py |  33 +-
 python/mxnet/util.py   |  61 
 src/common/utils.h |  15 +
 src/ndarray/ndarray_function.cc|  13 +-
 src/ndarray/ndarray_function.cu|   4 -
 src/operator/contrib/index_copy-inl.h  |   2 +-
 src/operator/contrib/index_copy.cc |   4 +-
 src/operator/leaky_relu-inl.h  |   2 +-
 src/operator/mshadow_op.h  |  30 ++
 src/operator/mxnet_op.h|  20 ++
 src/operator/nn/dropout-inl.h  |   4 +-
 src/operator/nn/mkldnn/mkldnn_base-inl.h   |   1 -
 src/operator/nn/mkldnn/mkldnn_base.cc  |   6 +-
 src/operator/nn/mkldnn/mkldnn_expand_dims.cc   |  70 -
 src/operator/nn/mkldnn/mkldnn_flatten-inl.h|  48 ---
 src/operator/nn/mkldnn/mkldnn_flatten.cc   |  79 -
 src/operator/nn/mkldnn/mkldnn_ops-inl.h|  29 +-
 src/operator/nn/mkldnn/mkldnn_reshape-inl.h| 152 -
 src/operator/nn/mkldnn/mkldnn_reshape.cc   |  95 +++---
 .../numpy/np_elemwise_broadcast_logic_op.cc| 301 ++
 .../numpy/np_elemwise_broadcast_logic_op.cu|  60 
 src/operator/numpy/np_elemwise_broadcast_op.cc | 223 -
 src/operator/numpy/np_elemwise_unary_op_basic.cc   |  65 ++--
 src/operator/numpy/np_elemwise_unary_op_basic.cu   |   9 +-
 src/operator/numpy/np_matrix_op-inl.h  |   4 +-
 src/operator/numpy/np_matrix_op.cc |  57 +++-
 src/operator/operator_tune.cc  |  10 +
 .../mkldnn/mkldnn_quantized_flatten.cc |   4 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |  42 ++-
 src/operator/tensor/elemwise_binary_op.h   |  26 ++
 src/operator/tensor/elemwise_binary_scalar_op.h|  20 ++
 src/operator/tensor/elemwise_unary_op.h|  21 +-
 src/operator/tensor/elemwise_unary_op_basic.cc |   1 +
 src/operator/tensor/histogram.cc   |   1 +
 src/operator/tensor/matrix_op-inl.h|  17 +-
 src/operator/tensor/matrix_op.cc   | 207 ++--
 src/operator/tensor/pseudo2DTranspose_op-inl.cuh   | 348 +
 tests/nightly/test_large_array.py  | 147 -
 tests/nightly/test_large_vector.py |  20 +-
 tests/python/unittest/common.py|  21 ++
 tests/python/unittest/test_numpy_ndarray.py|  33 +-
 tests/python/unittest/test_numpy_op.py |  45 ++-
 tests/python/unittest/test_operator.py |  39 +++
 51 files changed, 1667 insertions(+), 976 deletions(-)
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_expand_dims.cc
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten-inl.h
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cu
 create mode 100644 src/operator/tensor/pseudo2DTranspose_op-inl.cuh



[incubator-mxnet] branch mkldnn-v1.0 updated (9f77575 -> 43e35a9)

2019-10-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 9f77575  [mkldnn-v1.0] Update enabling flag for MKL dropout (#16433)
 add d8193c6  Update add_op_in_backend.md (#16403)
 add 7f5e687  numpy-compatible histogram (#16266)
 add ca30ba8  Pseudo 2D transpose kernel (#16229)
 add d2d76dc  increase docker cache timeout (#16430)
 add 4dee4ee  Fix mkldnn reshape (#16455)
 add 1e8cc90  [BUGFIX] Minor type issues in Squeeze (#16448)
 add 858a52e  Fix large array tests (#16328)
 add 6d6e46b  Comparison ops implemented using mshadow (#16414)
 add 43e35a9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 CMakeLists.txt |   6 +-
 ci/docker_cache.py |   2 +-
 .../src/pages/api/faq/add_op_in_backend.md |   2 +-
 python/mxnet/_numpy_op_doc.py  |  51 +++
 python/mxnet/ndarray/numpy/_op.py  |  53 +++-
 python/mxnet/numpy/multiarray.py   |  53 +++-
 python/mxnet/numpy_extension/__init__.py   |   2 +-
 python/mxnet/symbol/numpy/_symbol.py   |  85 -
 python/mxnet/test_utils.py |  33 +-
 python/mxnet/util.py   |  61 
 src/common/utils.h |  15 +
 src/ndarray/ndarray_function.cc|  13 +-
 src/ndarray/ndarray_function.cu|   4 -
 src/operator/contrib/index_copy-inl.h  |   2 +-
 src/operator/contrib/index_copy.cc |   4 +-
 src/operator/leaky_relu-inl.h  |   2 +-
 src/operator/mshadow_op.h  |  30 ++
 src/operator/mxnet_op.h|  20 ++
 src/operator/nn/dropout-inl.h  |   4 +-
 src/operator/nn/mkldnn/mkldnn_base-inl.h   |   1 -
 src/operator/nn/mkldnn/mkldnn_base.cc  |   6 +-
 src/operator/nn/mkldnn/mkldnn_expand_dims.cc   |  70 -
 src/operator/nn/mkldnn/mkldnn_flatten-inl.h|  48 ---
 src/operator/nn/mkldnn/mkldnn_flatten.cc   |  79 -
 src/operator/nn/mkldnn/mkldnn_ops-inl.h|  29 +-
 src/operator/nn/mkldnn/mkldnn_reshape-inl.h| 152 -
 src/operator/nn/mkldnn/mkldnn_reshape.cc   |  95 +++---
 .../numpy/np_elemwise_broadcast_logic_op.cc| 301 ++
 .../numpy/np_elemwise_broadcast_logic_op.cu|  60 
 src/operator/numpy/np_elemwise_broadcast_op.cc | 223 -
 src/operator/numpy/np_elemwise_unary_op_basic.cc   |  65 ++--
 src/operator/numpy/np_elemwise_unary_op_basic.cu   |   9 +-
 src/operator/numpy/np_matrix_op-inl.h  |   4 +-
 src/operator/numpy/np_matrix_op.cc |  57 +++-
 src/operator/operator_tune.cc  |  10 +
 .../mkldnn/mkldnn_quantized_flatten.cc |   4 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |  42 ++-
 src/operator/tensor/elemwise_binary_op.h   |  26 ++
 src/operator/tensor/elemwise_binary_scalar_op.h|  20 ++
 src/operator/tensor/elemwise_unary_op.h|  21 +-
 src/operator/tensor/elemwise_unary_op_basic.cc |   1 +
 src/operator/tensor/histogram.cc   |   1 +
 src/operator/tensor/matrix_op-inl.h|  17 +-
 src/operator/tensor/matrix_op.cc   | 207 ++--
 src/operator/tensor/pseudo2DTranspose_op-inl.cuh   | 348 +
 tests/nightly/test_large_array.py  | 147 -
 tests/nightly/test_large_vector.py |  20 +-
 tests/python/unittest/common.py|  21 ++
 tests/python/unittest/test_numpy_ndarray.py|  33 +-
 tests/python/unittest/test_numpy_op.py |  45 ++-
 tests/python/unittest/test_operator.py |  39 +++
 51 files changed, 1667 insertions(+), 976 deletions(-)
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_expand_dims.cc
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten-inl.h
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cu
 create mode 100644 src/operator/tensor/pseudo2DTranspose_op-inl.cuh



[incubator-mxnet] branch mkldnn-v1.0 updated (9f77575 -> 43e35a9)

2019-10-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 9f77575  [mkldnn-v1.0] Update enabling flag for MKL dropout (#16433)
 add d8193c6  Update add_op_in_backend.md (#16403)
 add 7f5e687  numpy-compatible histogram (#16266)
 add ca30ba8  Pseudo 2D transpose kernel (#16229)
 add d2d76dc  increase docker cache timeout (#16430)
 add 4dee4ee  Fix mkldnn reshape (#16455)
 add 1e8cc90  [BUGFIX] Minor type issues in Squeeze (#16448)
 add 858a52e  Fix large array tests (#16328)
 add 6d6e46b  Comparison ops implemented using mshadow (#16414)
 add 43e35a9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 CMakeLists.txt |   6 +-
 ci/docker_cache.py |   2 +-
 .../src/pages/api/faq/add_op_in_backend.md |   2 +-
 python/mxnet/_numpy_op_doc.py  |  51 +++
 python/mxnet/ndarray/numpy/_op.py  |  53 +++-
 python/mxnet/numpy/multiarray.py   |  53 +++-
 python/mxnet/numpy_extension/__init__.py   |   2 +-
 python/mxnet/symbol/numpy/_symbol.py   |  85 -
 python/mxnet/test_utils.py |  33 +-
 python/mxnet/util.py   |  61 
 src/common/utils.h |  15 +
 src/ndarray/ndarray_function.cc|  13 +-
 src/ndarray/ndarray_function.cu|   4 -
 src/operator/contrib/index_copy-inl.h  |   2 +-
 src/operator/contrib/index_copy.cc |   4 +-
 src/operator/leaky_relu-inl.h  |   2 +-
 src/operator/mshadow_op.h  |  30 ++
 src/operator/mxnet_op.h|  20 ++
 src/operator/nn/dropout-inl.h  |   4 +-
 src/operator/nn/mkldnn/mkldnn_base-inl.h   |   1 -
 src/operator/nn/mkldnn/mkldnn_base.cc  |   6 +-
 src/operator/nn/mkldnn/mkldnn_expand_dims.cc   |  70 -
 src/operator/nn/mkldnn/mkldnn_flatten-inl.h|  48 ---
 src/operator/nn/mkldnn/mkldnn_flatten.cc   |  79 -
 src/operator/nn/mkldnn/mkldnn_ops-inl.h|  29 +-
 src/operator/nn/mkldnn/mkldnn_reshape-inl.h| 152 -
 src/operator/nn/mkldnn/mkldnn_reshape.cc   |  95 +++---
 .../numpy/np_elemwise_broadcast_logic_op.cc| 301 ++
 .../numpy/np_elemwise_broadcast_logic_op.cu|  60 
 src/operator/numpy/np_elemwise_broadcast_op.cc | 223 -
 src/operator/numpy/np_elemwise_unary_op_basic.cc   |  65 ++--
 src/operator/numpy/np_elemwise_unary_op_basic.cu   |   9 +-
 src/operator/numpy/np_matrix_op-inl.h  |   4 +-
 src/operator/numpy/np_matrix_op.cc |  57 +++-
 src/operator/operator_tune.cc  |  10 +
 .../mkldnn/mkldnn_quantized_flatten.cc |   4 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |  42 ++-
 src/operator/tensor/elemwise_binary_op.h   |  26 ++
 src/operator/tensor/elemwise_binary_scalar_op.h|  20 ++
 src/operator/tensor/elemwise_unary_op.h|  21 +-
 src/operator/tensor/elemwise_unary_op_basic.cc |   1 +
 src/operator/tensor/histogram.cc   |   1 +
 src/operator/tensor/matrix_op-inl.h|  17 +-
 src/operator/tensor/matrix_op.cc   | 207 ++--
 src/operator/tensor/pseudo2DTranspose_op-inl.cuh   | 348 +
 tests/nightly/test_large_array.py  | 147 -
 tests/nightly/test_large_vector.py |  20 +-
 tests/python/unittest/common.py|  21 ++
 tests/python/unittest/test_numpy_ndarray.py|  33 +-
 tests/python/unittest/test_numpy_op.py |  45 ++-
 tests/python/unittest/test_operator.py |  39 +++
 51 files changed, 1667 insertions(+), 976 deletions(-)
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_expand_dims.cc
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten-inl.h
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cu
 create mode 100644 src/operator/tensor/pseudo2DTranspose_op-inl.cuh



[incubator-mxnet] branch mkldnn-v1.0 updated (9f77575 -> 43e35a9)

2019-10-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 9f77575  [mkldnn-v1.0] Update enabling flag for MKL dropout (#16433)
 add d8193c6  Update add_op_in_backend.md (#16403)
 add 7f5e687  numpy-compatible histogram (#16266)
 add ca30ba8  Pseudo 2D transpose kernel (#16229)
 add d2d76dc  increase docker cache timeout (#16430)
 add 4dee4ee  Fix mkldnn reshape (#16455)
 add 1e8cc90  [BUGFIX] Minor type issues in Squeeze (#16448)
 add 858a52e  Fix large array tests (#16328)
 add 6d6e46b  Comparison ops implemented using mshadow (#16414)
 add 43e35a9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

No new revisions were added by this update.

Summary of changes:
 CMakeLists.txt |   6 +-
 ci/docker_cache.py |   2 +-
 .../src/pages/api/faq/add_op_in_backend.md |   2 +-
 python/mxnet/_numpy_op_doc.py  |  51 +++
 python/mxnet/ndarray/numpy/_op.py  |  53 +++-
 python/mxnet/numpy/multiarray.py   |  53 +++-
 python/mxnet/numpy_extension/__init__.py   |   2 +-
 python/mxnet/symbol/numpy/_symbol.py   |  85 -
 python/mxnet/test_utils.py |  33 +-
 python/mxnet/util.py   |  61 
 src/common/utils.h |  15 +
 src/ndarray/ndarray_function.cc|  13 +-
 src/ndarray/ndarray_function.cu|   4 -
 src/operator/contrib/index_copy-inl.h  |   2 +-
 src/operator/contrib/index_copy.cc |   4 +-
 src/operator/leaky_relu-inl.h  |   2 +-
 src/operator/mshadow_op.h  |  30 ++
 src/operator/mxnet_op.h|  20 ++
 src/operator/nn/dropout-inl.h  |   4 +-
 src/operator/nn/mkldnn/mkldnn_base-inl.h   |   1 -
 src/operator/nn/mkldnn/mkldnn_base.cc  |   6 +-
 src/operator/nn/mkldnn/mkldnn_expand_dims.cc   |  70 -
 src/operator/nn/mkldnn/mkldnn_flatten-inl.h|  48 ---
 src/operator/nn/mkldnn/mkldnn_flatten.cc   |  79 -
 src/operator/nn/mkldnn/mkldnn_ops-inl.h|  29 +-
 src/operator/nn/mkldnn/mkldnn_reshape-inl.h| 152 -
 src/operator/nn/mkldnn/mkldnn_reshape.cc   |  95 +++---
 .../numpy/np_elemwise_broadcast_logic_op.cc| 301 ++
 .../numpy/np_elemwise_broadcast_logic_op.cu|  60 
 src/operator/numpy/np_elemwise_broadcast_op.cc | 223 -
 src/operator/numpy/np_elemwise_unary_op_basic.cc   |  65 ++--
 src/operator/numpy/np_elemwise_unary_op_basic.cu   |   9 +-
 src/operator/numpy/np_matrix_op-inl.h  |   4 +-
 src/operator/numpy/np_matrix_op.cc |  57 +++-
 src/operator/operator_tune.cc  |  10 +
 .../mkldnn/mkldnn_quantized_flatten.cc |   4 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |  42 ++-
 src/operator/tensor/elemwise_binary_op.h   |  26 ++
 src/operator/tensor/elemwise_binary_scalar_op.h|  20 ++
 src/operator/tensor/elemwise_unary_op.h|  21 +-
 src/operator/tensor/elemwise_unary_op_basic.cc |   1 +
 src/operator/tensor/histogram.cc   |   1 +
 src/operator/tensor/matrix_op-inl.h|  17 +-
 src/operator/tensor/matrix_op.cc   | 207 ++--
 src/operator/tensor/pseudo2DTranspose_op-inl.cuh   | 348 +
 tests/nightly/test_large_array.py  | 147 -
 tests/nightly/test_large_vector.py |  20 +-
 tests/python/unittest/common.py|  21 ++
 tests/python/unittest/test_numpy_ndarray.py|  33 +-
 tests/python/unittest/test_numpy_op.py |  45 ++-
 tests/python/unittest/test_operator.py |  39 +++
 51 files changed, 1667 insertions(+), 976 deletions(-)
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_expand_dims.cc
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten-inl.h
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cu
 create mode 100644 src/operator/tensor/pseudo2DTranspose_op-inl.cuh



[incubator-mxnet] branch mkldnn-v1.0 updated (9f77575 -> 43e35a9)

2019-10-14 Thread taolv
This is an automated email from the ASF dual-hosted git repository.

taolv pushed a change to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 9f77575  [mkldnn-v1.0] Update enabling flag for MKL dropout (#16433)
 add d8193c6  Update add_op_in_backend.md (#16403)
 add 7f5e687  numpy-compatible histogram (#16266)
 add ca30ba8  Pseudo 2D transpose kernel (#16229)
 add d2d76dc  increase docker cache timeout (#16430)
 add 4dee4ee  Fix mkldnn reshape (#16455)
 add 1e8cc90  [BUGFIX] Minor type issues in Squeeze (#16448)
 add 858a52e  Fix large array tests (#16328)
 add 6d6e46b  Comparison ops implemented using mshadow (#16414)
 new 43e35a9  Merge remote-tracking branch 'origin/master' into mkldnn-v1.0

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CMakeLists.txt |   6 +-
 ci/docker_cache.py |   2 +-
 .../src/pages/api/faq/add_op_in_backend.md |   2 +-
 python/mxnet/_numpy_op_doc.py  |  51 +++
 python/mxnet/ndarray/numpy/_op.py  |  53 +++-
 python/mxnet/numpy/multiarray.py   |  53 +++-
 python/mxnet/numpy_extension/__init__.py   |   2 +-
 python/mxnet/symbol/numpy/_symbol.py   |  85 -
 python/mxnet/test_utils.py |  33 +-
 python/mxnet/util.py   |  61 
 src/common/utils.h |  15 +
 src/ndarray/ndarray_function.cc|  13 +-
 src/ndarray/ndarray_function.cu|   4 -
 src/operator/contrib/index_copy-inl.h  |   2 +-
 src/operator/contrib/index_copy.cc |   4 +-
 src/operator/leaky_relu-inl.h  |   2 +-
 src/operator/mshadow_op.h  |  30 ++
 src/operator/mxnet_op.h|  20 ++
 src/operator/nn/dropout-inl.h  |   4 +-
 src/operator/nn/mkldnn/mkldnn_base-inl.h   |   1 -
 src/operator/nn/mkldnn/mkldnn_base.cc  |   6 +-
 src/operator/nn/mkldnn/mkldnn_expand_dims.cc   |  70 -
 src/operator/nn/mkldnn/mkldnn_flatten-inl.h|  48 ---
 src/operator/nn/mkldnn/mkldnn_flatten.cc   |  79 -
 src/operator/nn/mkldnn/mkldnn_ops-inl.h|  29 +-
 src/operator/nn/mkldnn/mkldnn_reshape-inl.h| 152 -
 src/operator/nn/mkldnn/mkldnn_reshape.cc   |  95 +++---
 .../numpy/np_elemwise_broadcast_logic_op.cc| 301 ++
 .../numpy/np_elemwise_broadcast_logic_op.cu|  60 
 src/operator/numpy/np_elemwise_broadcast_op.cc | 223 -
 src/operator/numpy/np_elemwise_unary_op_basic.cc   |  65 ++--
 src/operator/numpy/np_elemwise_unary_op_basic.cu   |   9 +-
 src/operator/numpy/np_matrix_op-inl.h  |   4 +-
 src/operator/numpy/np_matrix_op.cc |  57 +++-
 src/operator/operator_tune.cc  |  10 +
 .../mkldnn/mkldnn_quantized_flatten.cc |   4 +-
 src/operator/tensor/elemwise_binary_broadcast_op.h |  42 ++-
 src/operator/tensor/elemwise_binary_op.h   |  26 ++
 src/operator/tensor/elemwise_binary_scalar_op.h|  20 ++
 src/operator/tensor/elemwise_unary_op.h|  21 +-
 src/operator/tensor/elemwise_unary_op_basic.cc |   1 +
 src/operator/tensor/histogram.cc   |   1 +
 src/operator/tensor/matrix_op-inl.h|  17 +-
 src/operator/tensor/matrix_op.cc   | 207 ++--
 src/operator/tensor/pseudo2DTranspose_op-inl.cuh   | 348 +
 tests/nightly/test_large_array.py  | 147 -
 tests/nightly/test_large_vector.py |  20 +-
 tests/python/unittest/common.py|  21 ++
 tests/python/unittest/test_numpy_ndarray.py|  33 +-
 tests/python/unittest/test_numpy_op.py |  45 ++-
 tests/python/unittest/test_operator.py |  39 +++
 51 files changed, 1667 insertions(+), 976 deletions(-)
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_expand_dims.cc
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten-inl.h
 delete mode 100644 src/operator/nn/mkldnn/mkldnn_flatten.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cc
 create mode 100644 src/operator/numpy/np_elemwise_broadcast_logic_op.cu
 create mode 100644 src/operator/tensor/pseudo2DTranspose_op-inl.cuh



  1   2   >