[GitHub] cjolivier01 commented on a change in pull request #9672: CMake CUDA fixes + NCCL
cjolivier01 commented on a change in pull request #9672: CMake CUDA fixes + NCCL URL: https://github.com/apache/incubator-mxnet/pull/9672#discussion_r165660835 ## File path: CMakeLists.txt ## @@ -168,12 +167,43 @@ endif() include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src) -if(FIRST_CUDA) +if(USE_CUDA) + find_package(CUDA REQUIRED) + add_definitions(-DMSHADOW_USE_CUDA=1) + if(FIRST_CUDA AND (NOT USE_OLDCMAKECUDA)) +if(CUDA_TOOLSET STREQUAL "") + set(CUDA_TOOLSET "${CUDA_VERSION_STRING}") Review comment: the old code (not written by me) defaulted to Cuda 8 no matter what unless CUDA_TOOLKIT var is set. my change makes it so that it defaults to whatever version find_package(CUDA) finds, which is the default behavior in older versions (non FIRST_CUDA versions). this is still overridable by CUDA_TOOLKIT var. if you specify a version that way and CMake can?t find it, you get an error (CMake > 9.0 understands cuda compiling). alternately, you can specify the cuda directory which will be picked up by find_package(CUDA). see cmake?s FindCUDA.cmake for details. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] pengzhao-intel commented on a change in pull request #9552: [REQUEST FOR REVIEW | DO NOT MERGE] Model Quantization with Calibration
pengzhao-intel commented on a change in pull request #9552: [REQUEST FOR REVIEW | DO NOT MERGE] Model Quantization with Calibration URL: https://github.com/apache/incubator-mxnet/pull/9552#discussion_r165646442 ## File path: src/operator/quantization/quantized_conv.cc ## @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2017 by Contributors + * \file quantized_conv.cc + * \brief + * \author Ziheng Jiang, Jun Wu +*/ +#include "../nn/convolution-inl.h" + +namespace mxnet { +namespace op { + +// TODO(junwu): Reuse the InferShape function of convolution op after +// this pr is merged: https://github.com/apache/incubator-mxnet/pull/8302 +bool QuantizedConvShape(const nnvm::NodeAttrs& attrs, +std::vector* in_shape, +std::vector* out_shape) { + using namespace mshadow; + const ConvolutionParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(param.num_group, 1U) << "quantized_conv only supports num_group=1 for now"; + CHECK_EQ(in_shape->size(), param.no_bias? 6U : 9U); + CHECK_EQ(out_shape->size(), 3U); + if (param.layout.has_value()) { +CHECK_EQ(param.layout.value(), mshadow::kNCHW) << "quantized_conv only supports NCHW for now"; + } + CHECK_EQ(param.kernel.ndim(), 2U) << "quantized_conv only supports 2D convolution for now"; + CHECK(param.dilate.ndim() == 0U || param.dilate.Size() == 1U) +<< "quantized_conv only supports dilation=1 for all dimensions"; + const TShape& dshape = in_shape->at(0); + CHECK_EQ(dshape.ndim(), 4U); + if (dshape.ndim() == 0U) return false; + + const int N = 0, H = 2, W = 3, C = 1; + CHECK_EQ(dshape[C] % 4, 0U) +<< "for 8bit cudnn conv, the number of channel must be multiple of 4"; + CHECK_EQ(param.num_filter % 4, 0U) +<< "for 8bit cudnn conv, the number of channel must be multiple of 4"; + + TShape wshape{0, 0, 0, 0}; + wshape[N] = param.num_filter; + wshape[H] = param.kernel[0]; + wshape[W] = param.kernel[1]; + wshape[C] = dshape[C]; + SHAPE_ASSIGN_CHECK(*in_shape, 1, wshape); + const int start = param.no_bias? 2 : 3; + const int end = param.no_bias? 6 : 9; + for (int i = start; i < end; ++i) { +SHAPE_ASSIGN_CHECK(*in_shape, i, TShape{1}); + } + if (!param.no_bias) { +SHAPE_ASSIGN_CHECK(*in_shape, 2, Shape1(param.num_filter)); + } + + auto AddPad = [](index_t dsize, index_t pad) { return dsize + 2 * pad; }; + TShape oshape{1, 1, 1, 1}; + oshape[N] = dshape[N]; + oshape[C] = wshape[N]; + oshape[H] = (AddPad(dshape[H], param.pad[0]) - wshape[H]) / param.stride[0] + 1; + oshape[W] = (AddPad(dshape[W], param.pad[1]) - wshape[W]) / param.stride[1] + 1; + + SHAPE_ASSIGN_CHECK(*out_shape, 0, oshape); + SHAPE_ASSIGN_CHECK(*out_shape, 1, TShape({1})); + SHAPE_ASSIGN_CHECK(*out_shape, 2, TShape({1})); + return true; +} + +bool QuantizedConvType(const nnvm::NodeAttrs& attrs, + std::vector *in_type, + std::vector *out_type) { + const ConvolutionParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_type->size(), param.no_bias? 6U : 9U); + CHECK_EQ(out_type->size(), 3U); + TYPE_ASSIGN_CHECK(*in_type, 0, mshadow::kInt8); + TYPE_ASSIGN_CHECK(*in_type, 1, mshadow::kInt8); + if (!param.no_bias) { +TYPE_ASSIGN_CHECK(*in_type, 2, mshadow::kInt8); + } + + const size_t start = param.no_bias? 2 : 3; + const size_t end = param.no_bias? 6 : 9; + for (size_t i = start; i < end; ++i) { +TYPE_ASSIGN_CHECK(*in_type, i, mshadow::kFloat32); + } + + TYPE_ASSIGN_CHECK(*out_type, 0, mshadow::kInt32); + TYPE_ASSIGN_CHECK(*out_type, 1, mshadow::kFloat32); + TYPE_ASSIGN_CHECK(*out_type, 2, mshadow::kFloat32); + return true; +} + +NNVM_REGISTER_OP(_contrib_quantized_conv) +.describe(R"code(Convolution operator for input, weight and bias data type of int8, +and accumulates in type int32 for the output. For each argument, two more arguments of type +float32 must be provided representing the thresholds of quantizing argument from data +type float32 to int8. The final outputs contain the convolution result in int32, and min +and max thresholds representing the threholds for quantizing the float32 output into int32. +
[GitHub] pengzhao-intel commented on issue #9552: [REQUEST FOR REVIEW | DO NOT MERGE] Model Quantization with Calibration
pengzhao-intel commented on issue #9552: [REQUEST FOR REVIEW | DO NOT MERGE] Model Quantization with Calibration URL: https://github.com/apache/incubator-mxnet/pull/9552#issuecomment-362588513 @reminisce and all, This is an awesome PR ? Our team (@wentingj @jinhuang415) is also working on INT8 solution based on [MKL-DNN library](https://github.com/intel/mkl-dnn). And we plan to contribute our code with this PR. I have updated a slide to introduce the overall of our solution and status. I think we can align our solution from high level first and then go into technical details :) [Intel INT8 Solution for MXNet.pptx](https://github.com/apache/incubator-mxnet/files/1689472/Intel.INT8.Solution.for.MXNet.pptx) Feel free to let us know your questions, comments and suggestions. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on a change in pull request #9671: Exp backoff for downloads.
KellenSunderland commented on a change in pull request #9671: Exp backoff for downloads. URL: https://github.com/apache/incubator-mxnet/pull/9671#discussion_r165642736 ## File path: python/mxnet/gluon/utils.py ## @@ -61,13 +52,13 @@ def split_data(data, num_slice, batch_axis=0, even_split=True): size = data.shape[batch_axis] if size < num_slice: raise ValueError( -"Too many slices for data with shape %s. Arguments are " \ -"num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis)) +"Too many slices for data with shape %s. Arguments are " Review comment: I think triplequotes would be the equiv of ```python "Too many slices for data with shape %s. Arguments are \n" \ "num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis)) ``` but I'll try a few options. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on a change in pull request #9671: Exp backoff for downloads.
KellenSunderland commented on a change in pull request #9671: Exp backoff for downloads. URL: https://github.com/apache/incubator-mxnet/pull/9671#discussion_r165642736 ## File path: python/mxnet/gluon/utils.py ## @@ -61,13 +52,13 @@ def split_data(data, num_slice, batch_axis=0, even_split=True): size = data.shape[batch_axis] if size < num_slice: raise ValueError( -"Too many slices for data with shape %s. Arguments are " \ -"num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis)) +"Too many slices for data with shape %s. Arguments are " Review comment: I think triplequotes would be the equiv of ``` "Too many slices for data with shape %s. Arguments are \n" \ "num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis)) ``` but I'll try a few options. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] benqua commented on issue #8245: Use argmax instead of argmax_channel in Accuracy to keep dimention
benqua commented on issue #8245: Use argmax instead of argmax_channel in Accuracy to keep dimention URL: https://github.com/apache/incubator-mxnet/pull/8245#issuecomment-362581380 @yzhliu Can you check and merge if ok? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] larroy commented on a change in pull request #9671: Exp backoff for downloads.
larroy commented on a change in pull request #9671: Exp backoff for downloads. URL: https://github.com/apache/incubator-mxnet/pull/9671#discussion_r165637624 ## File path: python/mxnet/gluon/utils.py ## @@ -61,13 +52,13 @@ def split_data(data, num_slice, batch_axis=0, even_split=True): size = data.shape[batch_axis] if size < num_slice: raise ValueError( -"Too many slices for data with shape %s. Arguments are " \ -"num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis)) +"Too many slices for data with shape %s. Arguments are " Review comment: Isn't triple quotes useful in this case? """blahs dah""" This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2
KellenSunderland commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2 URL: https://github.com/apache/incubator-mxnet/issues/9612#issuecomment-36259 Thanks again for iterating on this. The guide is updated with a build that should include CUDNN. The only change is to the Dockerfile. Sorry I missed it in the first file. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KineticCookie closed issue #9361: infer_shape error for 'resnet-152'
KineticCookie closed issue #9361: infer_shape error for 'resnet-152' URL: https://github.com/apache/incubator-mxnet/issues/9361 This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2
KellenSunderland commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2 URL: https://github.com/apache/incubator-mxnet/issues/9612#issuecomment-36259 Thanks again for iterating on this. The guide is updated with a build that should include CUDNN. Sorry I missed it in the first file. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KineticCookie commented on issue #9361: infer_shape error for 'resnet-152'
KineticCookie commented on issue #9361: infer_shape error for 'resnet-152' URL: https://github.com/apache/incubator-mxnet/issues/9361#issuecomment-362577135 @kevinthesun thanks for the detailed explanation. ? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] szha commented on issue #8529: What is the functionality of OrderMutation
szha commented on issue #8529: What is the functionality of OrderMutation URL: https://github.com/apache/incubator-mxnet/issues/8529#issuecomment-362573098 @apache/mxnet-committers: This issue has been inactive for the past 90 days. It has no label and needs triage. For general "how-to" questions, our [user forum](https://discuss.mxnet.io/) (and [Chinese version](https://discuss.gluon.ai/)) is a good place to get help. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on issue #9552: [REQUEST FOR REVIEW | DO NOT MERGE] Model Quantization with Calibration
KellenSunderland commented on issue #9552: [REQUEST FOR REVIEW | DO NOT MERGE] Model Quantization with Calibration URL: https://github.com/apache/incubator-mxnet/pull/9552#issuecomment-362572156 Hey @reminisce, looking forward to this one on the edge team (if you can't tell). If you're going to test on the p3 instance I recommend cherry-picking this commit: #9684. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] viebboy commented on issue #7642: Max norm regularization
viebboy commented on issue #7642: Max norm regularization URL: https://github.com/apache/incubator-mxnet/issues/7642#issuecomment-362570173 Have you figured out how to implement max norm regularization yet? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland opened a new pull request #9684: Enable dp4a for the CI
KellenSunderland opened a new pull request #9684: Enable dp4a for the CI URL: https://github.com/apache/incubator-mxnet/pull/9684 ## Description ## Turn on sm61 so that when we add p3 support to the CI env we'll be able to execute the dp4a instruction. This should help test features like quantization in CI. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] dma100180 commented on issue #9673: MX.LSTM time series
dma100180 commented on issue #9673: MX.LSTM time series URL: https://github.com/apache/incubator-mxnet/issues/9673#issuecomment-362560897 Thank you very much ... I've been several weeks gracious again for letting me know This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] dma100180 closed issue #9673: MX.LSTM time series
dma100180 closed issue #9673: MX.LSTM time series URL: https://github.com/apache/incubator-mxnet/issues/9673 This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] SBMahapatra commented on issue #6023: pip install error: No matching distribution found for mxnet-cu80
SBMahapatra commented on issue #6023: pip install error: No matching distribution found for mxnet-cu80 URL: https://github.com/apache/incubator-mxnet/issues/6023#issuecomment-362561464 I am getting the same installation error for kera on windows 10. Could not find a version that satisfies the requirement kera (from versions: ) No matching distribution found for kera. pip version is (C:\ProgramData\Anaconda3) C:\Users\SB>pip --version pip 9.0.1 from C:\ProgramData\Anaconda3\lib\site-packages (python 3.6) This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on a change in pull request #9681: Better Exception Handling for Operators
KellenSunderland commented on a change in pull request #9681: Better Exception Handling for Operators URL: https://github.com/apache/incubator-mxnet/pull/9681#discussion_r165600888 ## File path: tests/python/unittest/test_exc_handling.py ## @@ -0,0 +1,112 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, Review comment: Nit: spacing This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on issue #9681: Better Exception Handling for Operators
KellenSunderland commented on issue #9681: Better Exception Handling for Operators URL: https://github.com/apache/incubator-mxnet/pull/9681#issuecomment-362538618 This is going to be a big improvement for new users experimenting with the library. Thanks for the great work @anirudh2290. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] KellenSunderland commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2
KellenSunderland commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2 URL: https://github.com/apache/incubator-mxnet/issues/9612#issuecomment-362535807 Indeed that seems to be a bug, I should be building with cudnn enabled. Let me try and update it. I've run into this compile error a few times before, so hopefully it won't take long to fix. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] yanhn commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2
yanhn commented on issue #9612: CUDNN_STATUS_SUCCESS (4 vs. 0) cuDNN: CUDNN_STATUS_INTERNAL_ERROR on jetson TX2 URL: https://github.com/apache/incubator-mxnet/issues/9612#issuecomment-362532985 Hi @KellenSunderland ~ I managed to install mxnet on TX2 using docker image build on the host under as you suggested. There's no error about CUDNN_STATUS_SUCCESS (4 vs. 0), but the speed is still the same. I cannot feel the existence of cudnn. And I think maybe that's because the line 15 in `Dockerfile.build.master.jetson`, I still built mxnet without cudnn. When I set line 15 to `ENV BUILD_OPTS "USE_OPENCV=0 USE_BLAS=openblas USE_SSE=0 USE_CUDA=1 USE_CUDNN=1 ENABLE_CUDA_RTC=0 USE_NCCL=0 USE_CUDA_PATH=/usr/local/cuda/"` and rerun the docker script, I got error like this: compilation terminated. In file included from /work/mxnet/mshadow/mshadow/tensor.h:16:0, from include/mxnet/./base.h:32, from include/mxnet/operator_util.h:43, from src/operator/contrib/./bounding_box-inl.h:27, from src/operator/contrib/bounding_box.cc:27: /work/mxnet/mshadow/mshadow/./base.h:163:21: fatal error: cudnn.h: No such file or directory #include ^ compilation terminated. In file included from /work/mxnet/mshadow/mshadow/tensor.h:16:0, from include/mxnet/./base.h:32, from include/mxnet/operator.h:38, from src/operator/contrib/./deformable_psroi_pooling-inl.h:32, from src/operator/contrib/deformable_psroi_pooling.cc:27: /work/mxnet/mshadow/mshadow/./base.h:163:21: fatal error: cudnn.h: No such file or directory #include ^ compilation terminated. Makefile:393: recipe for target 'build/src/operator/contrib/krprod.o' failed make: *** [build/src/operator/contrib/krprod.o] Error 1 make: *** Waiting for unfinished jobs Makefile:393: recipe for target 'build/src/operator/contrib/bounding_box.o' failed make: *** [build/src/operator/contrib/bounding_box.o] Error 1 Makefile:393: recipe for target 'build/src/operator/nn/cudnn/cudnn_algoreg.o' failed make: *** [build/src/operator/nn/cudnn/cudnn_algoreg.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/dequantize.o' failed make: *** [build/src/operator/contrib/dequantize.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/fft.o' failed make: *** [build/src/operator/contrib/fft.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/multibox_target.o' failed make: *** [build/src/operator/contrib/multibox_target.o] Error 1 Makefile:393: recipe for target 'build/src/operator/nn/cudnn/cudnn_batch_norm.o' failed make: *** [build/src/operator/nn/cudnn/cudnn_batch_norm.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/count_sketch.o' failed make: *** [build/src/operator/contrib/count_sketch.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/multi_proposal.o' failed make: *** [build/src/operator/contrib/multi_proposal.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/ctc_loss.o' failed make: *** [build/src/operator/contrib/ctc_loss.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/multibox_prior.o' failed make: *** [build/src/operator/contrib/multibox_prior.o] Error 1 Makefile:393: recipe for target 'build/src/operator/contrib/deformable_psroi_pooling.o' failed make: *** [build/src/operator/contrib/deformable_psroi_pooling.o] Error 1 The command '/bin/sh -c make -j$(nproc) $BUILD_OPTS' returned a non-zero code: 2 So~ still trying~ This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] jjinking commented on issue #9350: Trouble building with mkl from source
jjinking commented on issue #9350: Trouble building with mkl from source URL: https://github.com/apache/incubator-mxnet/issues/9350#issuecomment-362498033 It's a 2g RAM CentOS7.3 docker container. I'm able to successfully compile v0.10.0 with this but I guess I gotta reduce memory usage during compile for v1.0.0 This worked though! ``` make USE_MKL2017=1 USE_BLAS=atlas USE_OPENCV=1 USE_MKL2017_EXPERIMENTAL=1 USE_GPERFTOOLS=1 ``` This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] jjinking commented on issue #9350: Trouble building with mkl from source
jjinking commented on issue #9350: Trouble building with mkl from source URL: https://github.com/apache/incubator-mxnet/issues/9350#issuecomment-362498033 It's a 2g RAM CentOS7.3 docker container. I'm able to successfully compile v0.10.0 with this but I guess I gotta reduce memory usage during compile for v1.0.0 The following worked! ``` make USE_MKL2017=1 USE_BLAS=atlas USE_OPENCV=1 USE_MKL2017_EXPERIMENTAL=1 USE_GPERFTOOLS=1 ``` This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] jjinking commented on issue #9350: Trouble building with mkl from source
jjinking commented on issue #9350: Trouble building with mkl from source URL: https://github.com/apache/incubator-mxnet/issues/9350#issuecomment-362498033 It's a 2g CentOS7.3 docker container. I'm able to successfully compile v0.10.0 with this but I guess I gotta reduce memory usage during compile for v1.0.0 This worked though! ``` make USE_MKL2017=1 USE_BLAS=atlas USE_OPENCV=1 USE_MKL2017_EXPERIMENTAL=1 USE_GPERFTOOLS=1 ``` This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] jjinking commented on issue #9350: Trouble building with mkl from source
jjinking commented on issue #9350: Trouble building with mkl from source URL: https://github.com/apache/incubator-mxnet/issues/9350#issuecomment-362498033 It's a 2g CentOS7.3 docker container. I'm able to successfully compile v0.10.0 with this but I guess not for v1.0.0 This worked though! ``` make USE_MKL2017=1 USE_BLAS=atlas USE_OPENCV=1 USE_MKL2017_EXPERIMENTAL=1 USE_GPERFTOOLS=1 ``` This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] willbadr closed issue #9682: eifjccfutnudujnjcleetcrftbceijufdrbbeerrfeit
willbadr closed issue #9682: eifjccfutnudujnjcleetcrftbceijufdrbbeerrfeit URL: https://github.com/apache/incubator-mxnet/issues/9682 This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] dongzhuoyao opened a new pull request #9683: gluon block.py typo
dongzhuoyao opened a new pull request #9683: gluon block.py typo URL: https://github.com/apache/incubator-mxnet/pull/9683 ## Description ## (Brief description on what this PR is about) ## Checklist ## ### Essentials ### - [Y] Passed code style checking (`make lint`) - [Y] Changes are complete (i.e. I finished coding on this PR) - [Y] All changes have test coverage: - Unit tests are added for small changes to verify correctness (e.g. adding a new operator) - Nightly tests are added for complicated/long-running ones (e.g. changing distributed kvstore) - Build tests will be added for build configuration changes (e.g. adding a new build option with NCCL) - [Y] Code is well-documented: - For user-facing API changes, API doc string has been updated. - For new C++ functions in header files, their functionalities and arguments are documented. - For new examples, README.md is added to explain the what the example does, the source of the dataset, expected performance on test set and reference to the original paper if applicable - [Y] To the my best knowledge, examples are either not affected by this change, or have been fixed to be compatible with this change ### Changes ### - [Y] Feature1, tests, (and when applicable, API doc) - [Y] Feature2, tests, (and when applicable, API doc) ## Comments ## - If this change is a backward incompatible change, why must this change be made. - Interesting edge cases to note here This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] willbadr opened a new issue #9682: eifjccfutnudujnjcleetcrftbceijufdrbbeerrfeit
willbadr opened a new issue #9682: eifjccfutnudujnjcleetcrftbceijufdrbbeerrfeit URL: https://github.com/apache/incubator-mxnet/issues/9682 Note: Providing complete information in the most concise form is the best way to get help. This issue template serves as the checklist for essential information to most of the technical issues and bug reports. For non-technical issues and feature requests, feel free to present the information in what you believe is the best form. For Q & A and discussion, please start a discussion thread at https://discuss.mxnet.io ## Description (Brief description of the problem in no more than 2 sentences.) ## Environment info (Required) ``` What to do: 1. Download the diagnosis script from https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/diagnose.py 2. Run the script using `python diagnose.py` and paste its output here. ``` Package used (Python/R/Scala/Julia): (I'm using ...) For Scala user, please provide: 1. Java version: (`java -version`) 2. Maven version: (`mvn -version`) 3. Scala runtime if applicable: (`scala -version`) For R user, please provide R `sessionInfo()`: ## Build info (Required if built from source) Compiler (gcc/clang/mingw/visual studio): MXNet commit hash: (Paste the output of `git rev-parse HEAD` here.) Build config: (Paste the content of config.mk, or the build command.) ## Error Message: (Paste the complete error message, including stack trace.) ## Minimum reproducible example (If you are using your own code, please provide a short script that reproduces the error. Otherwise, please provide link to the existing example.) ## Steps to reproduce (Paste the commands you ran that produced the error.) 1. 2. ## What have you tried to solve it? 1. 2. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services