This is an automated email from the ASF dual-hosted git repository.

indhub pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new efef7b7  Temporarily disable 'test_row_sparse_pull' on GPU. (#8265)
efef7b7 is described below

commit efef7b7b4e24584e059a9c0ce995f02cc262cc08
Author: Indhu Bharathi <indhubhara...@gmail.com>
AuthorDate: Sat Oct 14 19:44:32 2017 -0700

    Temporarily disable 'test_row_sparse_pull' on GPU. (#8265)
    
    * Temporarily disable 'test_row_sparse_pull' on GPU. Can be enabled back 
after https://github.com/apache/incubator-mxnet/issues/8262 is resolved.
    
    * Disable test_row_sparse_pull for GPU (not CPU)
    
    * Fix build
---
 tests/python/gpu/test_kvstore_gpu.py    |  2 +
 tests/python/gpu/test_operator_gpu.py   | 19 +++++-----
 tests/python/unittest/test_kvstore.py   |  1 +
 tests/python/unittest/test_optimizer.py | 67 +++++++++++++++++----------------
 4 files changed, 47 insertions(+), 42 deletions(-)

diff --git a/tests/python/gpu/test_kvstore_gpu.py 
b/tests/python/gpu/test_kvstore_gpu.py
index ffc0cc1..517d2e7 100644
--- a/tests/python/gpu/test_kvstore_gpu.py
+++ b/tests/python/gpu/test_kvstore_gpu.py
@@ -18,6 +18,7 @@
 # pylint: skip-file
 import mxnet as mx
 import numpy as np
+import unittest
 from mxnet.test_utils import assert_almost_equal, default_context
 
 shape = (4, 4)
@@ -35,6 +36,7 @@ def init_kv_with_str(stype='default'):
     return kv
 
 
+@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. 
Tracked at https://github.com/apache/incubator-mxnet/issues/8262";)
 def test_row_sparse_pull():
     kv = init_kv_with_str('row_sparse')
     kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
diff --git a/tests/python/gpu/test_operator_gpu.py 
b/tests/python/gpu/test_operator_gpu.py
index 2f2c3a8..b1f43f3 100644
--- a/tests/python/gpu/test_operator_gpu.py
+++ b/tests/python/gpu/test_operator_gpu.py
@@ -21,6 +21,7 @@ import time
 import unittest
 import mxnet as mx
 import numpy as np
+import unittest
 from mxnet.test_utils import check_consistency, set_default_context, 
assert_almost_equal
 from numpy.testing import assert_allclose
 
@@ -1358,16 +1359,16 @@ def test_rnn_layer():
 def test_sequence_reverse():
     check_sequence_reverse(mx.gpu(0))
 
+@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. 
Tracked at https://github.com/apache/incubator-mxnet/issues/8211";)
+def test_autograd_save_memory():
+    x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
+    x.attach_grad()
 
-#def test_autograd_save_memory():
-#    x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
-#    x.attach_grad()
-#
-#    with mx.autograd.record():
-#        for i in range(200):
-#            x = x + 1
-#            x.wait_to_read()
-#    x.backward()
+    with mx.autograd.record():
+        for i in range(200):
+            x = x + 1
+            x.wait_to_read()
+    x.backward()
 
 def test_gluon_ctc_consistency():
     loss = mx.gluon.loss.CTCLoss()
diff --git a/tests/python/unittest/test_kvstore.py 
b/tests/python/unittest/test_kvstore.py
index 37d44e0..fc9e3be 100644
--- a/tests/python/unittest/test_kvstore.py
+++ b/tests/python/unittest/test_kvstore.py
@@ -18,6 +18,7 @@
 # pylint: skip-file
 import mxnet as mx
 import numpy as np
+import unittest
 from mxnet.test_utils import rand_ndarray, assert_almost_equal, 
assert_exception
 from mxnet.base import py_str, MXNetError
 
diff --git a/tests/python/unittest/test_optimizer.py 
b/tests/python/unittest/test_optimizer.py
index 62a1d14..8666b9e 100644
--- a/tests/python/unittest/test_optimizer.py
+++ b/tests/python/unittest/test_optimizer.py
@@ -18,6 +18,7 @@
 import numpy as np
 import mxnet as mx
 import mxnet.lr_scheduler as lr_scheduler
+import unittest
 from nose.tools import raises
 import math
 from mxnet.test_utils import *
@@ -532,39 +533,39 @@ class PyRMSProp(mx.optimizer.Optimizer):
         if self.clip_weights:
              mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, 
out=weight)
 
-#def test_rms():
-#    mx.random.seed(0)
-#    opt1 = PyRMSProp
-#    opt2 = mx.optimizer.RMSProp
-#    shape = (3, 4, 5)
-#    cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
-#    cw_options = [{}, {'clip_weights': 0.01}]
-#    center_options = [{}, {'centered': False}, {'centered': True}]
-#    rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
-#    wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
-#    mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
-#    for dtype in [np.float16, np.float32]:
-#        for cw_option in cw_options:
-#            for cg_option in cg_options:
-#                for center_option in center_options:
-#                    for rg_option in rg_options:
-#                        for wd_option in wd_options:
-#                            for mp_option in mp_options:
-#                                kwarg = {}
-#                                kwarg.update(cw_option)
-#                                kwarg.update(cg_option)
-#                                kwarg.update(center_option)
-#                                kwarg.update(rg_option)
-#                                kwarg.update(wd_option)
-#                                kwarg.update(mp_option)
-#                                if (dtype == np.float16 and
-#                                        ('multi_precision' not in kwarg or
-#                                            not kwarg['multi_precision'])):
-#                                    continue
-#                                compare_optimizer(opt1(**kwarg), 
opt2(**kwarg), shape, dtype)
-#                                if (default_context() == mx.cpu()):
-#                                    compare_optimizer(opt1(**kwarg), 
opt2(**kwarg), shape, dtype, g_stype='row_sparse')
-#
+@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. 
Tracked at https://github.com/apache/incubator-mxnet/issues/8230";)
+def test_rms():
+    mx.random.seed(0)
+    opt1 = PyRMSProp
+    opt2 = mx.optimizer.RMSProp
+    shape = (3, 4, 5)
+    cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
+    cw_options = [{}, {'clip_weights': 0.01}]
+    center_options = [{}, {'centered': False}, {'centered': True}]
+    rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
+    wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
+    mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
+    for dtype in [np.float16, np.float32]:
+        for cw_option in cw_options:
+            for cg_option in cg_options:
+                for center_option in center_options:
+                    for rg_option in rg_options:
+                        for wd_option in wd_options:
+                            for mp_option in mp_options:
+                                kwarg = {}
+                                kwarg.update(cw_option)
+                                kwarg.update(cg_option)
+                                kwarg.update(center_option)
+                                kwarg.update(rg_option)
+                                kwarg.update(wd_option)
+                                kwarg.update(mp_option)
+                                if (dtype == np.float16 and
+                                        ('multi_precision' not in kwarg or
+                                            not kwarg['multi_precision'])):
+                                    continue
+                                compare_optimizer(opt1(**kwarg), 
opt2(**kwarg), shape, dtype)
+                                if (default_context() == mx.cpu()):
+                                    compare_optimizer(opt1(**kwarg), 
opt2(**kwarg), shape, dtype, g_stype='row_sparse')
 
 class PyFtrl(mx.optimizer.Optimizer):
     """The Ftrl optimizer.

-- 
To stop receiving notification emails like this one, please contact
['"comm...@mxnet.apache.org" <comm...@mxnet.apache.org>'].

Reply via email to