sandeep-krishnamurthy closed pull request #12027: [MXNET-768] Partially enable flaky test for norm operator URL: https://github.com/apache/incubator-mxnet/pull/12027
This is a PR merged from a forked repository. As GitHub hides the original diff on merge, it is displayed below for the sake of provenance: As this is a foreign pull request (from a fork), the diff is supplied below (as it won't show otherwise due to GitHub magic): diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 90e85d123d5..53a726e7867 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -3107,7 +3107,7 @@ def l2norm(input_data, axis=0, keepdims=True): for order in [1, 2]: for dtype in [np.float16, np.float32, np.float64]: in_data = np.random.uniform(-1, 1, in_shape).astype(dtype) - in_data[abs(in_data) < epsilon] = epsilon + in_data[abs(in_data) < epsilon] = 2 * epsilon for i in range(in_data_dim): norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, keepdims=True) npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i) @@ -3121,20 +3121,22 @@ def l2norm(input_data, axis=0, keepdims=True): atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx) # Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509 # # check gradient - # check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-2, atol=1e-3) - # if i < in_data_dim-1: - # norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True) - # npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1)) - # npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out - # check_symbolic_forward(norm_sym, [in_data], [npy_out], - # rtol=1e-2 if dtype is np.float16 else 1e-5, - # atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx) - # check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape)], - # [npy_out_backward], - # rtol=1e-2 if dtype is np.float16 else 1e-5, - # atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx) - # # check gradient - # check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-2, atol=1e-3) + # if dtype is not np.float16: + # check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3) + if i < in_data_dim-1: + norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True) + npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1)) + npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out + check_symbolic_forward(norm_sym, [in_data], [npy_out], + rtol=1e-2 if dtype is np.float16 else 1e-5, + atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx) + check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape)], + [npy_out_backward], + rtol=1e-2 if dtype is np.float16 else 1e-5, + atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx) + # # check gradient + # if dtype is not np.float16: + # check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3) def test_layer_norm(): ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services