[GitHub] [incubator-mxnet] kshitij12345 commented on a change in pull request #15480: [MXNET-978] Higher Order Gradient Support `elemwise_mul`, `elemwise_add`.

2019-07-07 Thread GitBox
kshitij12345 commented on a change in pull request #15480: [MXNET-978] Higher 
Order Gradient Support `elemwise_mul`, `elemwise_add`.
URL: https://github.com/apache/incubator-mxnet/pull/15480#discussion_r300922295
 
 

 ##
 File path: src/operator/tensor/elemwise_binary_op_basic.cc
 ##
 @@ -251,7 +251,31 @@ NNVM_REGISTER_OP(_backward_mul)
 .set_attr("FCompute", ElemwiseBinaryOp::BackwardUseIn<
   cpu, mshadow_op::right, mshadow_op::left>)
 .set_attr("FComputeEx", ElemwiseBinaryOp::BackwardUseInEx<
-  cpu, mshadow_op::right, mshadow_op::left>);
+  cpu, mshadow_op::right, mshadow_op::left>)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// z = x * y
+// NodeEntry{n, 0, 0} : z_grad * y
+// NodeEntry{n, 1, 0} : z_grad * x
+// n->inputs[0] : z_grad
+// n->inputs[1] : x
+// n->inputs[1] : y
+// ograds[0] : head_grads
+// f(x, y) = x * y
+// dx = z_grad * y, dy = z_grad * x
+// d2x = 0, d2y = 0, dz_grad = dx + dy = y + x
+auto dz_grad = MakeNode("elemwise_add", n->attrs.name + "_y_add_x",
+{n->inputs[2], n->inputs[1]}, nullptr, );
+
+std::vector ret;
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+  {ograds[0], nnvm::NodeEntry{dz_grad}}, nullptr, 
));
+ret.emplace_back(MakeNode("zeros_like", n->attrs.name + 
"_backward_grad_grad_x",
 
 Review comment:
   Ah. Right. Thank You.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet] branch master updated: fix fp32 flatten issue (#15351)

2019-07-07 Thread patriczhao
This is an automated email from the ASF dual-hosted git repository.

patriczhao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
 new 091fece  fix fp32 flatten issue (#15351)
091fece is described below

commit 091fece5431b43c931568d279c1dd9d664318c36
Author: Wuxun Zhang 
AuthorDate: Mon Jul 8 10:07:37 2019 +0800

fix fp32 flatten issue (#15351)

* Fix flatten issue before slice op

* fix cpplint

* address comments

* retrigger CI

* trigger CI

* retrigger CI

* use SupportMKLDNNReshape and update operator list
---
 docs/tutorials/mkldnn/operator_list.md  |   2 +
 src/operator/nn/mkldnn/mkldnn_base-inl.h|   2 +
 src/operator/nn/mkldnn/mkldnn_flatten.cc|  87 +
 src/operator/nn/mkldnn/mkldnn_ops-inl.h |   9 +-
 src/operator/nn/mkldnn/mkldnn_reshape-inl.h |  68 ++
 src/operator/nn/mkldnn/mkldnn_reshape.cc| 185 +---
 src/operator/tensor/matrix_op.cc|  20 ++-
 tests/python/gpu/test_operator_gpu.py   |  14 +++
 tests/python/mkl/test_mkldnn.py |  20 +++
 9 files changed, 296 insertions(+), 111 deletions(-)

diff --git a/docs/tutorials/mkldnn/operator_list.md 
b/docs/tutorials/mkldnn/operator_list.md
index 4958f8d..0ef0f29 100644
--- a/docs/tutorials/mkldnn/operator_list.md
+++ b/docs/tutorials/mkldnn/operator_list.md
@@ -44,6 +44,8 @@ To help users understanding MKL-DNN backend better, the 
following table summariz
 | **elemwise_add**   | 1D-4D input| Y| 
Y  | Y  |
 | **Concat** | 1D-4D input| Y| 
Y  | Y  |
 | **slice**  | 1D-4D input| N| 
Y  | N  |
+| **Reshape**| 1D-4D input| N| 
Y  | N  |
+| **Flatten**| 1D-4D input| N| 
Y  | N  |
 | **Quantization**   | 1D-4D input| N| 
N  | Y  |
 | **Dequantization** | 1D-4D input| N| 
N  | Y  |
 | **Requantization** | 1D-4D input| N| 
N  | Y  |
diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h 
b/src/operator/nn/mkldnn/mkldnn_base-inl.h
index 5670983..e01b7b1 100644
--- a/src/operator/nn/mkldnn/mkldnn_base-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h
@@ -176,6 +176,7 @@ struct DeconvolutionParam;
 struct SoftmaxParam;
 struct SoftmaxOutputParam;
 struct TransposeParam;
+struct ReshapeParam;
 bool SupportMKLDNNAct(const ActivationParam& param);
 bool SupportMKLDNNAct(const ActivationParam& param, const NDArray );
 bool SupportQuantizedMKLDNNAct(const ActivationParam );
@@ -184,6 +185,7 @@ bool SupportMKLDNNDeconv(const DeconvolutionParam& params, 
const NDArray )
 bool SupportMKLDNNSoftmax(const SoftmaxParam& param, const NDArray , 
const NDArray );
 bool SupportMKLDNNSoftmaxOutput(const SoftmaxOutputParam );
 bool SupportMKLDNNTranspose(const TransposeParam& param, const NDArray );
+bool SupportMKLDNNReshape(const ReshapeParam , const NDArray );
 }  // namespace op
 
 static int GetTypeSize(int dtype) {
diff --git a/src/operator/nn/mkldnn/mkldnn_flatten.cc 
b/src/operator/nn/mkldnn/mkldnn_flatten.cc
new file mode 100644
index 000..fdc02f9
--- /dev/null
+++ b/src/operator/nn/mkldnn/mkldnn_flatten.cc
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file mkldnn_flatten.cc
+ * \brief Implement flatten operator by using mkldnn reorder primitive
+ * \author Wuxun Zhang
+*/
+
+#if MXNET_USE_MKLDNN == 1
+
+#include "mkldnn_reshape-inl.h"
+
+namespace mxnet {
+namespace op {
+
+class MKLDNNFlattenFwd : public MKLDNNReshapeFwd {
+ public:
+  explicit MKLDNNFlattenFwd(const OpReqType ,
+const NDArray ,
+const 

[GitHub] [incubator-mxnet] pengzhao-intel commented on issue #15351: fix fp32 flatten issue

2019-07-07 Thread GitBox
pengzhao-intel commented on issue #15351: fix fp32 flatten issue
URL: https://github.com/apache/incubator-mxnet/pull/15351#issuecomment-509053946
 
 
   Thanks for your contribution. Merging now.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] pengzhao-intel merged pull request #15351: fix fp32 flatten issue

2019-07-07 Thread GitBox
pengzhao-intel merged pull request #15351: fix fp32 flatten issue
URL: https://github.com/apache/incubator-mxnet/pull/15351
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] ciyongch commented on issue #15429: Operator Performance Regression on CPU

2019-07-07 Thread GitBox
ciyongch commented on issue #15429: Operator Performance Regression on CPU
URL: 
https://github.com/apache/incubator-mxnet/issues/15429#issuecomment-509050325
 
 
   @roywei does it mean the operator profiling results with current profiler 
module is not accurate?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.

2019-07-07 Thread marcoabreu
This is an automated email from the ASF dual-hosted git repository.

marcoabreu pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new fc0b96c  Bump the publish timestamp.
fc0b96c is described below

commit fc0b96cc25c4d4ff9d3a1ab541ffe380cef4317a
Author: mxnet-ci 
AuthorDate: Mon Jul 8 01:25:36 2019 +

Bump the publish timestamp.
---
 date.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/date.txt b/date.txt
new file mode 100644
index 000..e92dedd
--- /dev/null
+++ b/date.txt
@@ -0,0 +1 @@
+Mon Jul  8 01:25:36 UTC 2019



[GitHub] [incubator-mxnet] ZhennanQin commented on issue #15465: [RFC] Integrate TVM into Apache MXNet

2019-07-07 Thread GitBox
ZhennanQin commented on issue #15465: [RFC] Integrate TVM into Apache MXNet
URL: 
https://github.com/apache/incubator-mxnet/issues/15465#issuecomment-509039433
 
 
   @junrushao1994 The plugin idea looks too fantastic for me. Of course, If we 
can achieve that, then integrated TVM is able to provide very good performance.
   
   Here's another question about threading management. As you may know, MXNet 
is using openmp as threading management protocol. Also threaded engine can 
create many threaded workers to execute operators in parallel. So the total 
thread number on threaded engine is (total_workers * OMP_NUM_THREADS). AFAIK, 
TVM runtime has its own threading pool, will TVM switch to use openmp as 
threading management protocol after integration? can we run 2 TVM generated 
operators in different worker at same time?
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] junrushao1994 commented on issue #15465: [RFC] Integrate TVM into Apache MXNet

2019-07-07 Thread GitBox
junrushao1994 commented on issue #15465: [RFC] Integrate TVM into Apache MXNet
URL: 
https://github.com/apache/incubator-mxnet/issues/15465#issuecomment-509036689
 
 
   @cjolivier01 yep! The TVM compiler generates llvm IR which are compiled as 
binary and loaded as a C++ module, then could be used in all frontend languages 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] cjolivier01 commented on issue #15465: [RFC] Integrate TVM into Apache MXNet

2019-07-07 Thread GitBox
cjolivier01 commented on issue #15465: [RFC] Integrate TVM into Apache MXNet
URL: 
https://github.com/apache/incubator-mxnet/issues/15465#issuecomment-509033609
 
 
   dumb question: Are operators written this way available in any way to 
non-python languages such as lua, scala, etc?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest merged pull request #15413: [MXNET-978] Higher Order Gradient Support `reciprocal`, `abs`.

2019-07-07 Thread GitBox
apeforest merged pull request #15413: [MXNET-978] Higher Order Gradient Support 
`reciprocal`, `abs`.
URL: https://github.com/apache/incubator-mxnet/pull/15413
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet] branch master updated: [MXNET-978] Higher Order Gradient Support `reciprocal`, `abs`. (#15413)

2019-07-07 Thread apeforest
This is an automated email from the ASF dual-hosted git repository.

apeforest pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
 new a3ae309  [MXNET-978] Higher Order Gradient Support `reciprocal`, 
`abs`. (#15413)
a3ae309 is described below

commit a3ae30979989f488cd933c2fbb6416a4e187de9d
Author: kshitij12345 
AuthorDate: Mon Jul 8 02:35:09 2019 +0530

[MXNET-978] Higher Order Gradient Support `reciprocal`, `abs`. (#15413)

* add higher order support for reciprocal and abs

* add relevant tests

* address comments

* fix extra line in tests.
* fix missing space.
* fix incorrect comment.
---
 src/operator/tensor/elemwise_unary_op_basic.cc  | 54 -
 tests/python/unittest/test_higher_order_grad.py | 27 +
 2 files changed, 79 insertions(+), 2 deletions(-)

diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc 
b/src/operator/tensor/elemwise_unary_op_basic.cc
index 26c7408..6da384d 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cc
+++ b/src/operator/tensor/elemwise_unary_op_basic.cc
@@ -717,7 +717,38 @@ Example::
 
 MXNET_OPERATOR_REGISTER_BINARY(_backward_reciprocal)
 .set_attr("FCompute",
-  ElemwiseBinaryOp::Compute >);
+  ElemwiseBinaryOp::Compute >)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// ograds[0]: dL/dxgrad
+// inputs[0]: dL/dy
+// inputs[1]: x
+// f(x) = y = 1/x
+// f'(x) = -1/x^2
+// f''(x) = 2/x^3 = -2 * (f'(x) * f(x))
+
+const std::unordered_map args = {{"scalar", 
"-2.0"}};
+
+auto dydx_mul_dldy = nnvm::NodeEntry{n};  // f'(x) * head_grads
+auto dydx = MakeNode("elemwise_div", n->attrs.name + "_dydx",
+ {dydx_mul_dldy, n->inputs[0]}, nullptr, );
+auto fx = MakeNode("reciprocal", n->attrs.name + "_fx",
+   {n->inputs[1]}, nullptr, );
+
+auto d2ydx2_mid = MakeNode("elemwise_mul", n->attrs.name + "_d2ydx2_mid",
+   {dydx_mul_dldy, nnvm::NodeEntry{fx}}, nullptr, 
);
+
+auto d2ydx2 = MakeNode("_mul_scalar", n->attrs.name + "_d2ydx2",
+   {nnvm::NodeEntry{d2ydx2_mid}}, , );
+
+std::vector ret;
+
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+ {ograds[0], nnvm::NodeEntry{dydx}}, nullptr, ));
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad_inp",
+ {ograds[0], nnvm::NodeEntry{d2ydx2}}, nullptr, 
));
+return ret;
+});
 
 // abs
 MXNET_OPERATOR_REGISTER_UNARY_WITH_RSP_CSR(abs, cpu, mshadow_op::abs)
@@ -736,7 +767,26 @@ The storage type of ``abs`` output depends upon the input 
storage type:
 )code" ADD_FILELINE)
 .set_attr("FGradient", ElemwiseGradUseIn{"_backward_abs"});
 
-MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_abs, 
unary_bwd);
+MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_abs, 
unary_bwd)
+.set_attr("FGradient",
+[](const nnvm::NodePtr& n, const std::vector& ograds) {
+  // ograds[0]: dL/dxgrad
+  // inputs[0]: dL/dy
+  // inputs[1]: x
+  // f(x) -> abs(x)
+  // f'(x) = 1 if x > 0 else -1
+  // f''(x) = 0
+  auto dydx = MakeNode("elemwise_div", n->attrs.name + "_dydx",
+   {nnvm::NodeEntry{n}, n->inputs[0]}, nullptr, );
+
+  std::vector ret;
+  ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+{ograds[0], nnvm::NodeEntry(dydx)}, nullptr, 
));
+  ret.emplace_back(MakeNode("zeros_like", n->attrs.name + 
"_backward_grad_grad_in",
+{n->inputs[1]}, nullptr, ));
+  return ret;
+});
+
 
 // sign
 MXNET_OPERATOR_REGISTER_UNARY_WITH_RSP_CSR(sign, cpu, mshadow_op::sign)
diff --git a/tests/python/unittest/test_higher_order_grad.py 
b/tests/python/unittest/test_higher_order_grad.py
index ad14c50..0f07d01 100644
--- a/tests/python/unittest/test_higher_order_grad.py
+++ b/tests/python/unittest/test_higher_order_grad.py
@@ -107,6 +107,33 @@ def test_log10():
 
 
 @with_seed()
+def test_reciprocal():
+def reciprocal(x):
+return nd.reciprocal(x)
+
+def grad_grad_op(x):
+return 2 / x**3
+
+for dim in range(1, 5):
+shape = rand_shape_nd(dim)
+array = random_arrays(shape)
+check_second_order_unary(array, reciprocal, grad_grad_op)
+
+
+@with_seed()
+def test_abs():
+def abs(x):
+return nd.abs(x)
+
+def grad_grad_op(x):
+return nd.zeros_like(x)
+
+for dim in range(1, 5):
+shape = rand_shape_nd(dim)
+array = random_arrays(shape)
+check_second_order_unary(array, abs, grad_grad_op)
+
+
 def test_sigmoid():
 def sigmoid(x):
 return nd.sigmoid(x)



[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #15413: [MXNET-978] Higher Order Gradient Support `reciprocal`, `abs`.

2019-07-07 Thread GitBox
apeforest commented on a change in pull request #15413: [MXNET-978] Higher 
Order Gradient Support `reciprocal`, `abs`.
URL: https://github.com/apache/incubator-mxnet/pull/15413#discussion_r300885216
 
 

 ##
 File path: src/operator/tensor/elemwise_unary_op_basic.cc
 ##
 @@ -717,7 +717,38 @@ Example::
 
 MXNET_OPERATOR_REGISTER_BINARY(_backward_reciprocal)
 .set_attr("FCompute",
-  ElemwiseBinaryOp::Compute >);
+  ElemwiseBinaryOp::Compute >)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// ograds[0]: dL/dxgrad
+// inputs[0]: dL/dy
+// inputs[1]: x
+// f(x) = y = 1/x
+// f'(x) = -1/x^2
+// f''(x) = 2/x^3 = -2 * (f'(x) * f(x))
+
+const std::unordered_map args = {{"scalar", 
"-2.0"}};
+
+auto dydx_mul_dldy = nnvm::NodeEntry{n};  // f'(x) * head_grads
+auto dydx = MakeNode("elemwise_div", n->attrs.name + "_dydx",
+ {dydx_mul_dldy, n->inputs[0]}, nullptr, );
+auto fx = MakeNode("reciprocal", n->attrs.name + "_fx",
+   {n->inputs[1]}, nullptr, );
+
+auto d2ydx2_mid = MakeNode("elemwise_mul", n->attrs.name + "_d2ydx2_mid",
+   {dydx_mul_dldy, nnvm::NodeEntry{fx}}, nullptr, 
);
+
+auto d2ydx2 = MakeNode("_mul_scalar", n->attrs.name + "_d2ydx2",
+   {nnvm::NodeEntry{d2ydx2_mid}}, , );
+
+std::vector ret;
+
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
 
 Review comment:
   This term will be useful when you calculate the third order (and above) 
gradient.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #15480: [MXNET-978] Higher Order Gradient Support `elemwise_mul`, `elemwise_add`.

2019-07-07 Thread GitBox
apeforest commented on a change in pull request #15480: [MXNET-978] Higher 
Order Gradient Support `elemwise_mul`, `elemwise_add`.
URL: https://github.com/apache/incubator-mxnet/pull/15480#discussion_r300884866
 
 

 ##
 File path: src/operator/tensor/elemwise_binary_op_basic.cc
 ##
 @@ -274,7 +298,43 @@ NNVM_REGISTER_OP(_backward_div)
 .set_attr("FCompute", ElemwiseBinaryOp::BackwardUseIn<
   cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
 .set_attr("FComputeEx", ElemwiseBinaryOp::BackwardUseInEx<
-  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>);
+  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// z = x / y
+// NodeEntry{n, 0, 0} : z_grad * (1/y)
+// NodeEntry{n, 1, 0} : z_grad * (-x/y^2)
+// n->inputs[0] : z_grad
+// n->inputs[1] : x
+// n->inputs[1] : y
+// ograds[0] : head_grads
+// f(x, y) = x / y
+// dx = z_grad * (1/y), dy = z_grad * (-x/y^2)
+// d2x = 0, d2y = dy * (-2/x) = (2x/y^3), dz_grad = (dx + dy) / (z_grad)
+auto dx = nnvm::NodeEntry{n, 0, 0};
+auto dy = nnvm::NodeEntry{n, 1, 0};
+auto dx_add_dy = MakeNode("elemwise_add", n->attrs.name + "_x_add_y",
+  {dx, dy}, nullptr, );
+auto dz_grad = MakeNode("elemwise_div", n->attrs.name + "_x_add_y",
+ {nnvm::NodeEntry{dx_add_dy}, 
n->inputs[0]}, nullptr, );
+
+const std::unordered_map two = {{"scalar", 
"-2.0"}};
+auto y = n->inputs[2];
+auto r_y = MakeNode("reciprocal", n->attrs.name + "_r_y", {y}, nullptr, 
);
+auto neg_two_r_y = MakeNode("_mul_scalar", n->attrs.name + "_neg_two_r_y",
+{nnvm::NodeEntry{r_y}}, , );
+auto d2y = MakeNode("elemwise_mul", n->attrs.name + "_d2y",
+{nnvm::NodeEntry{neg_two_r_y}, dy}, , );
+
+std::vector ret;
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+  {ograds[0], nnvm::NodeEntry{dz_grad}}, nullptr, 
));
+ret.emplace_back(MakeNode("zeros_like", n->attrs.name + 
"_backward_grad_grad_x",
 
 Review comment:
   Same here. I don't think the second order gradient is zero. e.g. z = x / 
(x^2 + 1) 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #15480: [MXNET-978] Higher Order Gradient Support `elemwise_mul`, `elemwise_add`.

2019-07-07 Thread GitBox
apeforest commented on a change in pull request #15480: [MXNET-978] Higher 
Order Gradient Support `elemwise_mul`, `elemwise_add`.
URL: https://github.com/apache/incubator-mxnet/pull/15480#discussion_r300884866
 
 

 ##
 File path: src/operator/tensor/elemwise_binary_op_basic.cc
 ##
 @@ -274,7 +298,43 @@ NNVM_REGISTER_OP(_backward_div)
 .set_attr("FCompute", ElemwiseBinaryOp::BackwardUseIn<
   cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
 .set_attr("FComputeEx", ElemwiseBinaryOp::BackwardUseInEx<
-  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>);
+  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// z = x / y
+// NodeEntry{n, 0, 0} : z_grad * (1/y)
+// NodeEntry{n, 1, 0} : z_grad * (-x/y^2)
+// n->inputs[0] : z_grad
+// n->inputs[1] : x
+// n->inputs[1] : y
+// ograds[0] : head_grads
+// f(x, y) = x / y
+// dx = z_grad * (1/y), dy = z_grad * (-x/y^2)
+// d2x = 0, d2y = dy * (-2/x) = (2x/y^3), dz_grad = (dx + dy) / (z_grad)
+auto dx = nnvm::NodeEntry{n, 0, 0};
+auto dy = nnvm::NodeEntry{n, 1, 0};
+auto dx_add_dy = MakeNode("elemwise_add", n->attrs.name + "_x_add_y",
+  {dx, dy}, nullptr, );
+auto dz_grad = MakeNode("elemwise_div", n->attrs.name + "_x_add_y",
+ {nnvm::NodeEntry{dx_add_dy}, 
n->inputs[0]}, nullptr, );
+
+const std::unordered_map two = {{"scalar", 
"-2.0"}};
+auto y = n->inputs[2];
+auto r_y = MakeNode("reciprocal", n->attrs.name + "_r_y", {y}, nullptr, 
);
+auto neg_two_r_y = MakeNode("_mul_scalar", n->attrs.name + "_neg_two_r_y",
+{nnvm::NodeEntry{r_y}}, , );
+auto d2y = MakeNode("elemwise_mul", n->attrs.name + "_d2y",
+{nnvm::NodeEntry{neg_two_r_y}, dy}, , );
+
+std::vector ret;
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+  {ograds[0], nnvm::NodeEntry{dz_grad}}, nullptr, 
));
+ret.emplace_back(MakeNode("zeros_like", n->attrs.name + 
"_backward_grad_grad_x",
 
 Review comment:
   Same here. Please test z = x / (x^2 + 1) 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #15480: [MXNET-978] Higher Order Gradient Support `elemwise_mul`, `elemwise_add`.

2019-07-07 Thread GitBox
apeforest commented on a change in pull request #15480: [MXNET-978] Higher 
Order Gradient Support `elemwise_mul`, `elemwise_add`.
URL: https://github.com/apache/incubator-mxnet/pull/15480#discussion_r300884763
 
 

 ##
 File path: src/operator/tensor/elemwise_binary_op_basic.cc
 ##
 @@ -274,7 +298,43 @@ NNVM_REGISTER_OP(_backward_div)
 .set_attr("FCompute", ElemwiseBinaryOp::BackwardUseIn<
   cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
 .set_attr("FComputeEx", ElemwiseBinaryOp::BackwardUseInEx<
-  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>);
+  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// z = x / y
+// NodeEntry{n, 0, 0} : z_grad * (1/y)
+// NodeEntry{n, 1, 0} : z_grad * (-x/y^2)
+// n->inputs[0] : z_grad
+// n->inputs[1] : x
+// n->inputs[1] : y
+// ograds[0] : head_grads
+// f(x, y) = x / y
+// dx = z_grad * (1/y), dy = z_grad * (-x/y^2)
+// d2x = 0, d2y = dy * (-2/x) = (2x/y^3), dz_grad = (dx + dy) / (z_grad)
+auto dx = nnvm::NodeEntry{n, 0, 0};
+auto dy = nnvm::NodeEntry{n, 1, 0};
+auto dx_add_dy = MakeNode("elemwise_add", n->attrs.name + "_x_add_y",
+  {dx, dy}, nullptr, );
+auto dz_grad = MakeNode("elemwise_div", n->attrs.name + "_x_add_y",
+ {nnvm::NodeEntry{dx_add_dy}, 
n->inputs[0]}, nullptr, );
+
+const std::unordered_map two = {{"scalar", 
"-2.0"}};
+auto y = n->inputs[2];
+auto r_y = MakeNode("reciprocal", n->attrs.name + "_r_y", {y}, nullptr, 
);
+auto neg_two_r_y = MakeNode("_mul_scalar", n->attrs.name + "_neg_two_r_y",
+{nnvm::NodeEntry{r_y}}, , );
+auto d2y = MakeNode("elemwise_mul", n->attrs.name + "_d2y",
+{nnvm::NodeEntry{neg_two_r_y}, dy}, , );
+
+std::vector ret;
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+  {ograds[0], nnvm::NodeEntry{dz_grad}}, nullptr, 
));
+ret.emplace_back(MakeNode("zeros_like", n->attrs.name + 
"_backward_grad_grad_x",
 
 Review comment:
   Really?  Have you tested y = x * x?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #15480: [MXNET-978] Higher Order Gradient Support `elemwise_mul`, `elemwise_add`.

2019-07-07 Thread GitBox
apeforest commented on a change in pull request #15480: [MXNET-978] Higher 
Order Gradient Support `elemwise_mul`, `elemwise_add`.
URL: https://github.com/apache/incubator-mxnet/pull/15480#discussion_r300884782
 
 

 ##
 File path: src/operator/tensor/elemwise_binary_op_basic.cc
 ##
 @@ -251,7 +251,31 @@ NNVM_REGISTER_OP(_backward_mul)
 .set_attr("FCompute", ElemwiseBinaryOp::BackwardUseIn<
   cpu, mshadow_op::right, mshadow_op::left>)
 .set_attr("FComputeEx", ElemwiseBinaryOp::BackwardUseInEx<
-  cpu, mshadow_op::right, mshadow_op::left>);
+  cpu, mshadow_op::right, mshadow_op::left>)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// z = x * y
+// NodeEntry{n, 0, 0} : z_grad * y
+// NodeEntry{n, 1, 0} : z_grad * x
+// n->inputs[0] : z_grad
+// n->inputs[1] : x
+// n->inputs[1] : y
+// ograds[0] : head_grads
+// f(x, y) = x * y
+// dx = z_grad * y, dy = z_grad * x
+// d2x = 0, d2y = 0, dz_grad = dx + dy = y + x
+auto dz_grad = MakeNode("elemwise_add", n->attrs.name + "_y_add_x",
+{n->inputs[2], n->inputs[1]}, nullptr, );
+
+std::vector ret;
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+  {ograds[0], nnvm::NodeEntry{dz_grad}}, nullptr, 
));
+ret.emplace_back(MakeNode("zeros_like", n->attrs.name + 
"_backward_grad_grad_x",
 
 Review comment:
   Really? Have you tested z = x * x?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #15480: [MXNET-978] Higher Order Gradient Support `elemwise_mul`, `elemwise_add`.

2019-07-07 Thread GitBox
apeforest commented on a change in pull request #15480: [MXNET-978] Higher 
Order Gradient Support `elemwise_mul`, `elemwise_add`.
URL: https://github.com/apache/incubator-mxnet/pull/15480#discussion_r300884763
 
 

 ##
 File path: src/operator/tensor/elemwise_binary_op_basic.cc
 ##
 @@ -274,7 +298,43 @@ NNVM_REGISTER_OP(_backward_div)
 .set_attr("FCompute", ElemwiseBinaryOp::BackwardUseIn<
   cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
 .set_attr("FComputeEx", ElemwiseBinaryOp::BackwardUseInEx<
-  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>);
+  cpu, mshadow_op::div_grad, mshadow_op::div_rgrad>)
+.set_attr("FGradient",
+  [](const nnvm::NodePtr& n, const std::vector& ograds) {
+// z = x / y
+// NodeEntry{n, 0, 0} : z_grad * (1/y)
+// NodeEntry{n, 1, 0} : z_grad * (-x/y^2)
+// n->inputs[0] : z_grad
+// n->inputs[1] : x
+// n->inputs[1] : y
+// ograds[0] : head_grads
+// f(x, y) = x / y
+// dx = z_grad * (1/y), dy = z_grad * (-x/y^2)
+// d2x = 0, d2y = dy * (-2/x) = (2x/y^3), dz_grad = (dx + dy) / (z_grad)
+auto dx = nnvm::NodeEntry{n, 0, 0};
+auto dy = nnvm::NodeEntry{n, 1, 0};
+auto dx_add_dy = MakeNode("elemwise_add", n->attrs.name + "_x_add_y",
+  {dx, dy}, nullptr, );
+auto dz_grad = MakeNode("elemwise_div", n->attrs.name + "_x_add_y",
+ {nnvm::NodeEntry{dx_add_dy}, 
n->inputs[0]}, nullptr, );
+
+const std::unordered_map two = {{"scalar", 
"-2.0"}};
+auto y = n->inputs[2];
+auto r_y = MakeNode("reciprocal", n->attrs.name + "_r_y", {y}, nullptr, 
);
+auto neg_two_r_y = MakeNode("_mul_scalar", n->attrs.name + "_neg_two_r_y",
+{nnvm::NodeEntry{r_y}}, , );
+auto d2y = MakeNode("elemwise_mul", n->attrs.name + "_d2y",
+{nnvm::NodeEntry{neg_two_r_y}, dy}, , );
+
+std::vector ret;
+ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + 
"_backward_grad_grad",
+  {ograds[0], nnvm::NodeEntry{dz_grad}}, nullptr, 
));
+ret.emplace_back(MakeNode("zeros_like", n->attrs.name + 
"_backward_grad_grad_x",
 
 Review comment:
   Really?  Have you tested y = x * x?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.

2019-07-07 Thread marcoabreu
This is an automated email from the ASF dual-hosted git repository.

marcoabreu pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new baab1c2  Bump the publish timestamp.
baab1c2 is described below

commit baab1c2dff3ce145574784ca7753af9d7deba2f1
Author: mxnet-ci 
AuthorDate: Sun Jul 7 20:47:04 2019 +

Bump the publish timestamp.
---
 date.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/date.txt b/date.txt
new file mode 100644
index 000..7704830
--- /dev/null
+++ b/date.txt
@@ -0,0 +1 @@
+Sun Jul  7 20:47:04 UTC 2019



[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.

2019-07-07 Thread marcoabreu
This is an automated email from the ASF dual-hosted git repository.

marcoabreu pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new e2f5f67  Bump the publish timestamp.
e2f5f67 is described below

commit e2f5f67c54f0073e1fcab4bcde7ea512b181e834
Author: mxnet-ci 
AuthorDate: Sun Jul 7 19:20:21 2019 +

Bump the publish timestamp.
---
 date.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/date.txt b/date.txt
new file mode 100644
index 000..b0dfc17
--- /dev/null
+++ b/date.txt
@@ -0,0 +1 @@
+Sun Jul  7 19:20:21 UTC 2019



[GitHub] [incubator-mxnet] nopattern opened a new issue #15482: mx2onnx error about batchnorm

2019-07-07 Thread GitBox
nopattern opened a new issue #15482: mx2onnx error  about  batchnorm
URL: https://github.com/apache/incubator-mxnet/issues/15482
 
 
   
   ## Description
I use mx2onnx onnx_mxnet.export_model to transfer mxnet symbol to onnx . 
But the moving_mean_var param of Batchnorm is not in the params. So the 
   
   ## Environment info (Required)
   
   ```
   --Python Info--
   Version  : 3.6.8
   Compiler : GCC 5.4.0 20160609
   Build: ('default', 'May  7 2019 14:58:50')
   Arch : ('64bit', 'ELF')
   Pip Info---
   Version  : 19.1.1
   Directory: /usr/local/lib/python3.6/dist-packages/pip
   --MXNet Info---
   Version  : 1.5.0
   Directory: /home/deep/workssd/mxnet/incubator-mxnet/python/mxnet
   Hashtag not found. Not installed from pre-built package.
   --System Info--
   Platform : Linux-4.4.0-148-generic-x86_64-with-Ubuntu-16.04-xenial
   system   : Linux
   node : MS-7817
   release  : 4.4.0-148-generic
   version  : #174-Ubuntu SMP Tue May 7 12:20:14 UTC 2019
   --Hardware Info--
   machine  : x86_64
   processor: x86_64
   Architecture:  x86_64
   CPU op-mode(s):32-bit, 64-bit
   Byte Order:Little Endian
   CPU(s):4
   On-line CPU(s) list:   0-3
   Thread(s) per core:1
   Core(s) per socket:4
   Socket(s): 1
   NUMA node(s):  1
   Vendor ID: GenuineIntel
   CPU family:6
   Model: 60
   Model name:Intel(R) Core(TM) i5-4590 CPU @ 3.30GHz
   Stepping:  3
   CPU MHz:   3657.070
   CPU max MHz:   3700.
   CPU min MHz:   800.
   BogoMIPS:  6600.45
   Virtualization:VT-x
   L1d cache: 32K
   L1i cache: 32K
   L2 cache:  256K
   L3 cache:  6144K
   NUMA node0 CPU(s): 0-3
   
   
   ```
   
   Package used (Python/R/Scala/Julia):
   (I'm usining Python)
   
   ## Build info (Required if built from source)
   
   Compiler (gcc):
   
   MXNet commit hash:
   (da4b2a82511df)
   
   Build config:
   # Licensed to the Apache Software Foundation (ASF) under one
   # or more contributor license agreements.  See the NOTICE file
   # distributed with this work for additional information
   # regarding copyright ownership.  The ASF licenses this file
   # to you under the Apache License, Version 2.0 (the
   # "License"); you may not use this file except in compliance
   # with the License.  You may obtain a copy of the License at
   #
   #   http://www.apache.org/licenses/LICENSE-2.0
   #
   # Unless required by applicable law or agreed to in writing,
   # software distributed under the License is distributed on an
   # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
   # KIND, either express or implied.  See the License for the
   # specific language governing permissions and limitations
   # under the License.
   
   
#---
   #  Template configuration for compiling mxnet
   #
   #  If you want to change the configuration, please use the following
   #  steps. Assume you are on the root directory of mxnet. First copy the this
   #  file so that any local changes will be ignored by git
   #
   #  $ cp make/config.mk .
   #
   #  Next modify the according entries, and then compile by
   #
   #  $ make
   #
   #  or build in parallel with 8 threads
   #
   #  $ make -j8
   
#---
   
   #-
   # choice of compiler
   #
   
   ifndef CC
   export CC = gcc
   endif
   ifndef CXX
   export CXX = g++
   endif
   ifndef NVCC
   export NVCC = nvcc
   endif
   
   # whether compile with options for MXNet developer
   DEV = 0
   
   # whether compile with debug
   DEBUG = 0
   
   # whether to turn on segfault signal handler to log the stack trace
   USE_SIGNAL_HANDLER =
   
   # the additional link flags you want to add
   ADD_LDFLAGS =
   
   # the additional compile flags you want to add
   ADD_CFLAGS =
   
   #-
   # matrix computation libraries for CPU/GPU
   #-
   
   # whether use CUDA during compile
   USE_CUDA = 1
   
   # add the path to CUDA library to link and compile flag
   # if you have already add them to environment variable, leave it as NONE
   USE_CUDA_PATH = /usr/local/cuda
   #USE_CUDA_PATH = NONE
   
   # whether to enable CUDA runtime compilation
   ENABLE_CUDA_RTC = 1
   
   # whether use CuDNN R3 library
   USE_CUDNN = 1
   
   # whether to use NVTX when profiling
   USE_NVTX = 0
   
   #whether to use NCCL library
   USE_NCCL = 0
   #add the path to NCCL library
   USE_NCCL_PATH = NONE
   
   # whether use opencv during compilation
 

[GitHub] [incubator-mxnet] kshitij12345 opened a new pull request #15481: fix comment

2019-07-07 Thread GitBox
kshitij12345 opened a new pull request #15481: fix comment
URL: https://github.com/apache/incubator-mxnet/pull/15481
 
 
   fix comment.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.

2019-07-07 Thread marcoabreu
This is an automated email from the ASF dual-hosted git repository.

marcoabreu pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new a255f3a  Bump the publish timestamp.
a255f3a is described below

commit a255f3a8a07a580b904b0ef4be92386cf79841be
Author: mxnet-ci 
AuthorDate: Sun Jul 7 13:14:43 2019 +

Bump the publish timestamp.
---
 date.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/date.txt b/date.txt
new file mode 100644
index 000..ef9812e
--- /dev/null
+++ b/date.txt
@@ -0,0 +1 @@
+Sun Jul  7 13:14:43 UTC 2019



[GitHub] [incubator-mxnet] pengzhao-intel commented on issue #15453: add inference mode for op perf benchmark

2019-07-07 Thread GitBox
pengzhao-intel commented on issue #15453: add inference mode for op perf 
benchmark
URL: https://github.com/apache/incubator-mxnet/pull/15453#issuecomment-508990900
 
 
   Thanks for the improvement. 
   Could you paste the output of inference OP for both CPU and GPU?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] pengzhao-intel merged pull request #15451: Had a few PRs merged. Hope to become an official contributor.

2019-07-07 Thread GitBox
pengzhao-intel merged pull request #15451: Had a few PRs merged. Hope to become 
an official contributor.
URL: https://github.com/apache/incubator-mxnet/pull/15451
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet] branch master updated: Had a few PRs merged. Hope to become an official contributor and potentially a commiter. (#15451)

2019-07-07 Thread patriczhao
This is an automated email from the ASF dual-hosted git repository.

patriczhao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
 new a6ed12f  Had a few PRs merged. Hope to become an official contributor 
and potentially a commiter. (#15451)
a6ed12f is described below

commit a6ed12fe4f49ffceb12b25244a16fde42cfa11b6
Author: Disi A 
AuthorDate: Sun Jul 7 07:07:20 2019 -0400

Had a few PRs merged. Hope to become an official contributor and 
potentially a commiter. (#15451)
---
 CONTRIBUTORS.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 43d9880..750a22a 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -244,6 +244,7 @@ List of Contributors
 * [Shoubhik Bhattacharya](https://github.com/shoubhik)
 * [Rohit Srivastava](https://github.com/access2rohit)
 * [Caner Turkmen](https://github.com/canerturkmen)
+* [Disi A](https://github.com/adis300)
 
 Label Bot
 -



[GitHub] [incubator-mxnet] pengzhao-intel commented on issue #15451: Had a few PRs merged. Hope to become an official contributor.

2019-07-07 Thread GitBox
pengzhao-intel commented on issue #15451: Had a few PRs merged. Hope to become 
an official contributor.
URL: https://github.com/apache/incubator-mxnet/pull/15451#issuecomment-508990763
 
 
   Thanks for your contribution. Merging now.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] pengzhao-intel edited a comment on issue #15473: mac mxnet cpu compile error

2019-07-07 Thread GitBox
pengzhao-intel edited a comment on issue #15473: mac mxnet cpu compile error
URL: 
https://github.com/apache/incubator-mxnet/issues/15473#issuecomment-508990407
 
 
   Could you update the issue with a simple summary about what error you 
encountered and how to reproduce it?
   
   Feel free to delete un-related info in the template.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] pengzhao-intel commented on issue #15473: mac mxnet cpu compile error

2019-07-07 Thread GitBox
pengzhao-intel commented on issue #15473: mac mxnet cpu compile error
URL: 
https://github.com/apache/incubator-mxnet/issues/15473#issuecomment-508990407
 
 
   Could you update the description with a simple summary about what error you 
encountered and how to reproduce it?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] kshitij12345 opened a new pull request #15480: Develop/add higher order/elemwise {mul/div}[MXNET-978] Higher Order Gradient Support `elemwise_mul`, `elemwise_add`.

2019-07-07 Thread GitBox
kshitij12345 opened a new pull request #15480: Develop/add higher 
order/elemwise {mul/div}[MXNET-978] Higher Order Gradient Support 
`elemwise_mul`, `elemwise_add`.
URL: https://github.com/apache/incubator-mxnet/pull/15480
 
 
   ## Description ##
   PR intends to add support for higher order gradient for `elemwise_mul`, 
`elemwise_div`.
   
   ## Checklist ##
   ### Essentials ###
   Please feel free to remove inapplicable items for your PR.
   - [x] The PR title starts with [MXNET-$JIRA_ID], where $JIRA_ID refers to 
the relevant [JIRA-978 issue](https://issues.apache.org/jira/browse/MXNET-978) 
created (except PRs with tiny changes)
   - [x] Changes are complete (i.e. I finished coding on this PR)
   - [x] All changes have test coverage:
   - Unit tests are added for small changes to verify correctness (e.g. adding 
a new operator)
   - Nightly tests are added for complicated/long-running ones (e.g. changing 
distributed kvstore)
   - Build tests will be added for build configuration changes (e.g. adding a 
new build option with NCCL)
   - [x] Code is well-documented: 
   - For user-facing API changes, API doc string has been updated. 
   - For new C++ functions in header files, their functionalities and arguments 
are documented. 
   - For new examples, README.md is added to explain the what the example does, 
the source of the dataset, expected performance on test set and reference to 
the original paper if applicable
   - Check the API doc at 
http://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html
   - [x] To the my best knowledge, examples are either not affected by this 
change, or have been fixed to be compatible with this change
   
   ### Changes ###
   - [x] higher order gradient for a `elemwise_mul`, `elemwise_div`.
   - [x] unit test for the same.
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] KhurramPirov opened a new issue #15479: Clear optimizer state in batch.end.callback

2019-07-07 Thread GitBox
KhurramPirov opened a new issue #15479: Clear optimizer state in 
batch.end.callback
URL: https://github.com/apache/incubator-mxnet/issues/15479
 
 
   ## Description
   I want to clear SGD optimizer state during some steps of training.
   Package used:
   Python 3.7
   ## What have you tried to solve it?
   1. I tried to delete opt and then create new, inside the function batch 
callback
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.

2019-07-07 Thread marcoabreu
This is an automated email from the ASF dual-hosted git repository.

marcoabreu pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new d99e4c3  Bump the publish timestamp.
d99e4c3 is described below

commit d99e4c375060f3bf64ae30007fe32b9aedf7fc49
Author: mxnet-ci 
AuthorDate: Sun Jul 7 07:22:20 2019 +

Bump the publish timestamp.
---
 date.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/date.txt b/date.txt
new file mode 100644
index 000..270e9d7
--- /dev/null
+++ b/date.txt
@@ -0,0 +1 @@
+Sun Jul  7 07:22:19 UTC 2019



[GitHub] [incubator-mxnet] gyshi closed issue #15478: compute in GPU , astype('int32') overflow problem

2019-07-07 Thread GitBox
gyshi closed issue #15478: compute in GPU ,  astype('int32') overflow problem
URL: https://github.com/apache/incubator-mxnet/issues/15478
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] nopattern edited a comment on issue #14589: convert from mxnet to onnx failed, with Unrecognized attribute: spatial for operator BatchNormalization

2019-07-07 Thread GitBox
nopattern edited a comment on issue #14589: convert from mxnet to onnx failed, 
with Unrecognized attribute: spatial for operator BatchNormalization
URL: 
https://github.com/apache/incubator-mxnet/issues/14589#issuecomment-508975646
 
 
   I have convert mxnet(1.5.0) to the onnx(1.5.0), the error is:
   
 `INFO:root:Converting idx: 3, op: null, name: 
first-3x3-conv-batchnorm_gamma
   INFO:root:Converting idx: 4, op: null, name: first-3x3-conv-batchnorm_beta
   INFO:root:Converting idx: 5, op: null, name: 
first-3x3-conv-batchnorm_moving_mean
   Traceback (most recent call last):
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1741, in 
   main()
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1735, in main
   globals = debugger.run(setup['file'], None, None, is_module)
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1135, in run
   pydev_imports.execfile(file, globals, locals)  # execute the script
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/_pydev_imps/_pydev_execfile.py",
 line 18, in execfile
   exec(compile(contents+"\n", file, 'exec'), glob, loc)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 484, 
in 
   tune_and_evaluate(tuning_option)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 436, 
in tune_and_evaluate
   net, params, input_shape, _ = get_network(network, batch_size=1)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 93, 
in get_network
   return get_network_lpr_mb2(name,batch_size)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 143, 
in get_network_lpr_mb2
   test_onnx()
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 135, 
in test_onnx
   converted_model_path = onnx_mxnet.export_model(mx_sym, args, 
[input_shape], np.float32, onnx_file, True)
 File 
"/home/deep/workssd/mxnet/incubator-mxnet/python/mxnet/contrib/onnx/mx2onnx/export_model.py",
 line 87, in export_model
   verbose=verbose)
 File 
"/home/deep/workssd/mxnet/incubator-mxnet/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py",
 line 234, in create_onnx_graph_proto
   in_shape=in_shape[graph_input_idx],
   IndexError: list index out of range
   Error in sys.excepthook:
   Traceback (most recent call last):
 File "/usr/lib/python3/dist-packages/apport_python_hook.py", line 63, in 
apport_excepthook
   from apport.fileutils import likely_packaged, get_recent_crashes
 File "/usr/lib/python3/dist-packages/apport/__init__.py", line 5, in 

   from apport.report import Report
 File "/usr/lib/python3/dist-packages/apport/report.py", line 30, in 

   import apport.fileutils
 File "/usr/lib/python3/dist-packages/apport/fileutils.py", line 23, in 

   from apport.packaging_impl import impl as packaging
 File "/usr/lib/python3/dist-packages/apport/packaging_impl.py", line 23, 
in 
   import apt
 File "/usr/lib/python3/dist-packages/apt/__init__.py", line 23, in 
   import apt_pkg
   ModuleNotFoundError: No module named 'apt_pkg'
   
   Original exception was:
   Traceback (most recent call last):
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1741, in 
   main()
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1735, in main
   globals = debugger.run(setup['file'], None, None, is_module)
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1135, in run
   pydev_imports.execfile(file, globals, locals)  # execute the script
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/_pydev_imps/_pydev_execfile.py",
 line 18, in execfile
   exec(compile(contents+"\n", file, 'exec'), glob, loc)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 484, 
in 
   tune_and_evaluate(tuning_option)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 436, 
in tune_and_evaluate
   net, params, input_shape, _ = get_network(network, batch_size=1)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 93, 
in get_network
   return get_network_lpr_mb2(name,batch_size)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 143, 
in get_network_lpr_mb2
   test_onnx()
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 135, 
in test_onnx
   converted_model_path = onnx_mxnet.export_model(mx_sym, args, 
[input_shape], np.float32, onnx_file, True)
 File 
"/home/deep/workssd/mxnet/incubator-mxnet/python/mxnet/contrib/onnx/mx2onnx/export_model.py",
 line 87, in export_model
   verbose=verbose)
 File 

[GitHub] [incubator-mxnet] nopattern commented on issue #14589: convert from mxnet to onnx failed, with Unrecognized attribute: spatial for operator BatchNormalization

2019-07-07 Thread GitBox
nopattern commented on issue #14589: convert from mxnet to onnx failed, with 
Unrecognized attribute: spatial for operator BatchNormalization
URL: 
https://github.com/apache/incubator-mxnet/issues/14589#issuecomment-508975646
 
 
   I have convert mxnet(1.5.0) to the onnx(1.5.0), the error is:
 `INFO:root:Converting idx: 3, op: null, name: 
first-3x3-conv-batchnorm_gamma
   INFO:root:Converting idx: 4, op: null, name: first-3x3-conv-batchnorm_beta
   INFO:root:Converting idx: 5, op: null, name: 
first-3x3-conv-batchnorm_moving_mean
   Traceback (most recent call last):
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1741, in 
   main()
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1735, in main
   globals = debugger.run(setup['file'], None, None, is_module)
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1135, in run
   pydev_imports.execfile(file, globals, locals)  # execute the script
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/_pydev_imps/_pydev_execfile.py",
 line 18, in execfile
   exec(compile(contents+"\n", file, 'exec'), glob, loc)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 484, 
in 
   tune_and_evaluate(tuning_option)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 436, 
in tune_and_evaluate
   net, params, input_shape, _ = get_network(network, batch_size=1)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 93, 
in get_network
   return get_network_lpr_mb2(name,batch_size)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 143, 
in get_network_lpr_mb2
   test_onnx()
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 135, 
in test_onnx
   converted_model_path = onnx_mxnet.export_model(mx_sym, args, 
[input_shape], np.float32, onnx_file, True)
 File 
"/home/deep/workssd/mxnet/incubator-mxnet/python/mxnet/contrib/onnx/mx2onnx/export_model.py",
 line 87, in export_model
   verbose=verbose)
 File 
"/home/deep/workssd/mxnet/incubator-mxnet/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py",
 line 234, in create_onnx_graph_proto
   in_shape=in_shape[graph_input_idx],
   IndexError: list index out of range
   Error in sys.excepthook:
   Traceback (most recent call last):
 File "/usr/lib/python3/dist-packages/apport_python_hook.py", line 63, in 
apport_excepthook
   from apport.fileutils import likely_packaged, get_recent_crashes
 File "/usr/lib/python3/dist-packages/apport/__init__.py", line 5, in 

   from apport.report import Report
 File "/usr/lib/python3/dist-packages/apport/report.py", line 30, in 

   import apport.fileutils
 File "/usr/lib/python3/dist-packages/apport/fileutils.py", line 23, in 

   from apport.packaging_impl import impl as packaging
 File "/usr/lib/python3/dist-packages/apport/packaging_impl.py", line 23, 
in 
   import apt
 File "/usr/lib/python3/dist-packages/apt/__init__.py", line 23, in 
   import apt_pkg
   ModuleNotFoundError: No module named 'apt_pkg'
   
   Original exception was:
   Traceback (most recent call last):
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1741, in 
   main()
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1735, in main
   globals = debugger.run(setup['file'], None, None, is_module)
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/pydevd.py", 
line 1135, in run
   pydev_imports.execfile(file, globals, locals)  # execute the script
 File 
"/home/deep/workssd/work/pycharm-community-2019.1.1/helpers/pydev/_pydev_imps/_pydev_execfile.py",
 line 18, in execfile
   exec(compile(contents+"\n", file, 'exec'), glob, loc)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 484, 
in 
   tune_and_evaluate(tuning_option)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 436, 
in tune_and_evaluate
   net, params, input_shape, _ = get_network(network, batch_size=1)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 93, 
in get_network
   return get_network_lpr_mb2(name,batch_size)
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 143, 
in get_network_lpr_mb2
   test_onnx()
 File "/home/deep/workssd/arm/tvm_app/tune_relay_mobile_gpu.py", line 135, 
in test_onnx
   converted_model_path = onnx_mxnet.export_model(mx_sym, args, 
[input_shape], np.float32, onnx_file, True)
 File 
"/home/deep/workssd/mxnet/incubator-mxnet/python/mxnet/contrib/onnx/mx2onnx/export_model.py",
 line 87, in export_model
   verbose=verbose)
 File