leezu commented on a change in pull request #18622:
URL: https://github.com/apache/incubator-mxnet/pull/18622#discussion_r457718521



##########
File path: src/common/cuda/rtc.cc
##########
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "mxnet/base.h"
+
+#if MXNET_USE_CUDA
+
+#include <nvrtc.h>
+
+#include <mutex>
+#include <string>
+#include <fstream>
+#include <unordered_map>
+#include <vector>
+
+#include "rtc.h"
+#include "rtc/half-inl.h"
+#include "rtc/util-inl.h"
+#include "rtc/forward_functions-inl.h"
+#include "rtc/backward_functions-inl.h"
+#include "rtc/vectorization-inl.h"
+#include "rtc/special_functions-inl.h"
+#include "rtc/reducer-inl.h"
+#include "utils.h"
+
+
+namespace mxnet {
+namespace common {
+namespace cuda {
+namespace rtc {
+
+std::mutex lock;
+
+namespace util {
+
+std::string to_string(OpReqType req) {
+  switch (req) {
+    case kNullOp:
+      return "OpReqType::kNullOp";
+    case kWriteTo:
+    case kWriteInplace:
+      return "OpReqType::kWriteTo";
+    case kAddTo:
+      return "OpReqType::kAddTo";
+  }
+  LOG(FATAL) << "Unrecognized req.";
+  return "";
+}
+
+}  // namespace util
+
+namespace {
+
+// Obtain compilation log from the program.
+std::string GetCompileLog(nvrtcProgram program) {
+  size_t log_size_including_null;
+  NVRTC_CALL(nvrtcGetProgramLogSize(program, &log_size_including_null));
+  // For most std::string implementations, this is probably 1 char bigger than 
needed.  OK though.

Review comment:
       Why is it needed?
   
   "The elements of a basic_string are stored contiguously, that is, for a 
basic_string s, &*(s.begin() + n) == &*s.begin() + n for any n in [0, 
s.size()), or, equivalently, a pointer to s[0] can be passed to functions that 
expect a pointer to the first element of a null-terminated (since C++11)CharT[] 
array."
   
   https://en.cppreference.com/w/cpp/string/basic_string

##########
File path: src/common/cuda/rtc.cc
##########
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "mxnet/base.h"
+
+#if MXNET_USE_CUDA
+
+#include <nvrtc.h>
+
+#include <mutex>
+#include <string>
+#include <fstream>
+#include <unordered_map>
+#include <vector>
+
+#include "rtc.h"
+#include "rtc/half-inl.h"
+#include "rtc/util-inl.h"
+#include "rtc/forward_functions-inl.h"
+#include "rtc/backward_functions-inl.h"
+#include "rtc/vectorization-inl.h"
+#include "rtc/special_functions-inl.h"
+#include "rtc/reducer-inl.h"
+#include "utils.h"
+
+
+namespace mxnet {
+namespace common {
+namespace cuda {
+namespace rtc {
+
+std::mutex lock;
+
+namespace util {
+
+std::string to_string(OpReqType req) {
+  switch (req) {
+    case kNullOp:
+      return "OpReqType::kNullOp";
+    case kWriteTo:
+    case kWriteInplace:
+      return "OpReqType::kWriteTo";
+    case kAddTo:
+      return "OpReqType::kAddTo";
+  }
+  LOG(FATAL) << "Unrecognized req.";
+  return "";
+}
+
+}  // namespace util
+
+namespace {
+
+// Obtain compilation log from the program.
+std::string GetCompileLog(nvrtcProgram program) {
+  size_t log_size_including_null;
+  NVRTC_CALL(nvrtcGetProgramLogSize(program, &log_size_including_null));
+  // For most std::string implementations, this is probably 1 char bigger than 
needed.  OK though.
+  std::string log(log_size_including_null, '\0');
+  NVRTC_CALL(nvrtcGetProgramLog(program, &log[0]));
+  // Make sure the string reflects the true size (so minus the null 
terminator).
+  log.resize(log_size_including_null - 1);
+  return log;
+}
+
+// Obtain compilation result (ptx assembly) from the program.
+std::string GetPtx(nvrtcProgram program) {
+  size_t ptx_size_including_null;
+  NVRTC_CALL(nvrtcGetPTXSize(program, &ptx_size_including_null));
+  // For most std::string implementations, this is probably 1 char bigger than 
needed.  OK though.
+  std::string ptx(ptx_size_including_null, '\0');
+  NVRTC_CALL(nvrtcGetPTX(program, &ptx[0]));
+  // Make sure the string reflects the true size (so minus the null 
terminator).
+  ptx.resize(ptx_size_including_null - 1);
+  return ptx;
+}
+
+}  // namespace
+
+CUfunction get_function(const std::string &parameters,
+                        const std::string &kernel_name,
+                        const std::string &code,
+                        int dev_id) {
+  constexpr int CACHESIZE_WARN_THRESHOLD = 10000;
+  std::lock_guard<std::mutex> l(lock);
+  // Local class for value type of compile cache
+  struct KernelInfo {
+    std::string mangled_name;
+    std::string ptx;
+    std::vector<CUfunction> functions;
+  };
+  // Maps from the kernel name and parameters to the ptx and jit-compiled 
CUfunctions.
+  using KernelCache = std::unordered_map<std::string, KernelInfo>;
+  // Per-gpu-architecture compiled kernel cache with jit-compiled function for 
each device context
+  static std::unordered_map<int32_t, KernelCache> compiled_kernels;
+  int sm_arch = SMArch(dev_id);
+  // make null map as needed
+  KernelCache& compiled_kernels_this_arch = compiled_kernels[sm_arch];
+  // make KernelInfo as needed
+  KernelInfo& kinfo = compiled_kernels_this_arch[parameters + kernel_name];
+  if (kinfo.ptx.size() == 0) {
+    // It's the first time we've seen this kernel, so we need to generate the 
ptx and mangled_name.
+    static std::string common_header =
+        std::string(fp16_support_string) + "\n" +
+        type_support_string + "\n" +
+        util_string + "\n" +
+        special_functions_definitions + '\n' +
+        function_definitions_util + "\n" +
+        function_definitions_binary + "\n" +
+        function_definitions_unary + "\n" +
+        backward_function_definitions + "\n" +
+        vectorization_support_string + "\n" +
+        reducer + "\n";
+    std::string code_with_header = common_header + parameters + code;
+    // If verbose mode, output kernel source, though not including the common 
header
+    if (dmlc::GetEnv("MXNET_RTC_VERBOSE", false)) {
+      LOG(INFO) << "\n" << std::string(80, '-') << "\n" << (parameters + code);
+    }
+    if (compiled_kernels_this_arch.size() == CACHESIZE_WARN_THRESHOLD + 1 &&
+        dmlc::GetEnv("MXNET_RTC_SIZE_WARNING", true)) {
+      LOG(WARNING) << "The number of different compiled kernels exceeds "
+                   << CACHESIZE_WARN_THRESHOLD
+                   << ".  Set MXNET_RTC_SIZE_WARNING=0 to quiet this warning.";
+    }
+    nvrtcProgram program;
+    NVRTC_CALL(nvrtcCreateProgram(&program,                                  
// prog
+                                  &code_with_header[0],                      
// buffer
+                                  (kernel_name + "_kernel.cu").c_str(),      
// name
+                                  0,                                         
// num headers
+                                  nullptr,                                   
// headers
+                                  nullptr));                                 
// include names
+
+    std::string gpu_arch_arg = "--gpu-architecture=compute_" + 
std::to_string(sm_arch);
+    const char *opts[] = {gpu_arch_arg.c_str(),
+#if NDEBUG == 0
+                          "-G",
+#endif
+                          "--std=c++11"};

Review comment:
       `c++14`?

##########
File path: src/operator/numpy/np_elemwise_broadcast_op_extended.cc
##########
@@ -201,16 +201,12 @@ 
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_copysign_scalar)
 
 MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rcopysign_scalar)
 .set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, 
mshadow_op::rcopysign>)
-.set_attr<nnvm::FGradient>("FGradient", 
ElemwiseGradUseIn{"_backward_npi_rcopysign_scalar"});
+.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

Review comment:
       Why change the grad nodes?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to