This is an automated email from the ASF dual-hosted git repository.

zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new c2714c0  MXNET_FORCE_ADDTAKEGRAD to disable 
AddTakeGradLargeBatchCaller (#11316)
c2714c0 is described below

commit c2714c01460b30919965ced3b95473c7bcb2cbd7
Author: Leonard Lausen <leon...@lausen.nl>
AuthorDate: Mon Jun 18 18:26:51 2018 +0000

    MXNET_FORCE_ADDTAKEGRAD to disable AddTakeGradLargeBatchCaller (#11316)
    
    * MXNET_FORCE_ADDTAKEGRAD to disable AddTakeGradLargeBatchCaller
    
    If MXNET_FORCE_ADDTAKEGRAD is set, EmbeddingOpBackward will always use
    AddTakeGrad independently of gradient input and output shape
    
    * Read MXNET_FORCE_ADDTAKEGRAD to a static variable
---
 src/operator/tensor/indexing_op.h | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/src/operator/tensor/indexing_op.h 
b/src/operator/tensor/indexing_op.h
index 8738196..5f9e59d 100644
--- a/src/operator/tensor/indexing_op.h
+++ b/src/operator/tensor/indexing_op.h
@@ -598,7 +598,11 @@ void EmbeddingOpBackward(const nnvm::NodeAttrs& attrs,
         uint64_t shape_out_prod =
           static_cast<uint64_t>(grad_out.shape_[0])*
           static_cast<uint64_t>(grad_out.shape_[1]);
-        if (shape_out_prod < (uint64_t)16384 && shape_in_prod < 
(uint64_t)16384) {
+
+        static bool default_addtakegrad =
+            dmlc::GetEnv("MXNET_FORCE_ADDTAKEGRAD", false);
+        if (!default_addtakegrad || (shape_out_prod < (uint64_t)16384 &&
+                                     shape_in_prod < (uint64_t)16384)) {
           AddTakeGrad(grad_in, data, grad_out);
         } else {
           AddTakeGradLargeBatchCaller(ctx, grad_in, data, grad_out);

-- 
To stop receiving notification emails like this one, please contact
zhash...@apache.org.

Reply via email to