[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336322608
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -35,6 +35,7 @@
 #include 
 #include 
 #include 
+#include 
 
 Review comment:
   Done


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336319079
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -63,7 +211,46 @@ struct MXTensor {
   MXTensor() : data_ptr(NULL) {}
 
   MXTensor(void *data_ptr, const std::vector , MXDType dtype)
-  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {
+dltensor.data = data_ptr;
+dltensor.ctx.device_type = kDLCPU;
+dltensor.ctx.device_id = 0;
+dltensor.ndim = shape.size();
+dltensor.shape = const_cast(shape.data());
+dltensor.strides = NULL;
+dltensor.byte_offset = 0;
+dltensor.dtype.lanes = 1;
+switch(dtype) {
+case kFloat32:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 32;
+  break;
+case kFloat64:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 64;
+  break;
+case kFloat16:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 16;
+  break;
+case kUint8:
+  dltensor.dtype.code = kDLUInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt32:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 32;
+  break;
+case kInt8:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt64:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 64;
 
 Review comment:
   Done, changed to:
   
https://github.com/apache/incubator-mxnet/blob/5761891d039c49e34ab1da86ed802239d83e545d/include/mxnet/lib_api.h#L256-L258


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336319079
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -63,7 +211,46 @@ struct MXTensor {
   MXTensor() : data_ptr(NULL) {}
 
   MXTensor(void *data_ptr, const std::vector , MXDType dtype)
-  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {
+dltensor.data = data_ptr;
+dltensor.ctx.device_type = kDLCPU;
+dltensor.ctx.device_id = 0;
+dltensor.ndim = shape.size();
+dltensor.shape = const_cast(shape.data());
+dltensor.strides = NULL;
+dltensor.byte_offset = 0;
+dltensor.dtype.lanes = 1;
+switch(dtype) {
+case kFloat32:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 32;
+  break;
+case kFloat64:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 64;
+  break;
+case kFloat16:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 16;
+  break;
+case kUint8:
+  dltensor.dtype.code = kDLUInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt32:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 32;
+  break;
+case kInt8:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt64:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 64;
 
 Review comment:
   Done, changed to:
   
https://github.com/apache/incubator-mxnet/pull/15921/commits/5761891d039c49e34ab1da86ed802239d83e545d#diff-5c218b5f3e585d7cc7e4ca8b88470d40R256-R258


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336304389
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -63,7 +211,46 @@ struct MXTensor {
   MXTensor() : data_ptr(NULL) {}
 
   MXTensor(void *data_ptr, const std::vector , MXDType dtype)
-  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {
+dltensor.data = data_ptr;
+dltensor.ctx.device_type = kDLCPU;
+dltensor.ctx.device_id = 0;
+dltensor.ndim = shape.size();
+dltensor.shape = const_cast(shape.data());
+dltensor.strides = NULL;
+dltensor.byte_offset = 0;
+dltensor.dtype.lanes = 1;
+switch(dtype) {
+case kFloat32:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 32;
+  break;
+case kFloat64:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 64;
+  break;
+case kFloat16:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 16;
+  break;
+case kUint8:
+  dltensor.dtype.code = kDLUInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt32:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 32;
+  break;
+case kInt8:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt64:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 64;
 
 Review comment:
   What if we just do this:
   ```
   default:
 dltensor.dtype.code = 0;
 dltensor.dtype.bits = 0;
 std::cout << "Error! Invalid dtype flag: " << dtype << std::endl;
   ```
   Since we're not including any glog we cant do "LOG(FATAL)" like we do here:
   
https://github.com/apache/incubator-mxnet/blob/15ea40d9bf7da3c4618ca45a6d023d9b0fb1c295/include/mxnet/tensor_blob.h#L387


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336304389
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -63,7 +211,46 @@ struct MXTensor {
   MXTensor() : data_ptr(NULL) {}
 
   MXTensor(void *data_ptr, const std::vector , MXDType dtype)
-  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {
+dltensor.data = data_ptr;
+dltensor.ctx.device_type = kDLCPU;
+dltensor.ctx.device_id = 0;
+dltensor.ndim = shape.size();
+dltensor.shape = const_cast(shape.data());
+dltensor.strides = NULL;
+dltensor.byte_offset = 0;
+dltensor.dtype.lanes = 1;
+switch(dtype) {
+case kFloat32:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 32;
+  break;
+case kFloat64:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 64;
+  break;
+case kFloat16:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 16;
+  break;
+case kUint8:
+  dltensor.dtype.code = kDLUInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt32:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 32;
+  break;
+case kInt8:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt64:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 64;
 
 Review comment:
   What if we just do this:
   ```
   default:
 dltensor.dtype.code = 0;
 dltensor.dtype.bits = 0;
 std::cout << "Error! Invalid dtype flag: " << dtype << std::endl;
   ```
   Since we're not including any glog we cant do "LOG(FATAL)" like we do here:
   
https://github.com/apache/incubator-mxnet/blob/15ea40d9bf7da3c4618ca45a6d023d9b0fb1c295/include/mxnet/tensor_blob.h#L376-L391


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336304389
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -63,7 +211,46 @@ struct MXTensor {
   MXTensor() : data_ptr(NULL) {}
 
   MXTensor(void *data_ptr, const std::vector , MXDType dtype)
-  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {
+dltensor.data = data_ptr;
+dltensor.ctx.device_type = kDLCPU;
+dltensor.ctx.device_id = 0;
+dltensor.ndim = shape.size();
+dltensor.shape = const_cast(shape.data());
+dltensor.strides = NULL;
+dltensor.byte_offset = 0;
+dltensor.dtype.lanes = 1;
+switch(dtype) {
+case kFloat32:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 32;
+  break;
+case kFloat64:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 64;
+  break;
+case kFloat16:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 16;
+  break;
+case kUint8:
+  dltensor.dtype.code = kDLUInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt32:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 32;
+  break;
+case kInt8:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt64:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 64;
 
 Review comment:
   What if we just do this:
   ```
   default:
 dltensor.dtype.code = 0;
 dltensor.dtype.bits = 0;
 std::cout << "Error! Invalid dtype flag: " << dtype << std::endl;
   ```
   Since we're not including any glog we cant do "LOG(FATAL)" like we do here:


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336304389
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -63,7 +211,46 @@ struct MXTensor {
   MXTensor() : data_ptr(NULL) {}
 
   MXTensor(void *data_ptr, const std::vector , MXDType dtype)
-  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {
+dltensor.data = data_ptr;
+dltensor.ctx.device_type = kDLCPU;
+dltensor.ctx.device_id = 0;
+dltensor.ndim = shape.size();
+dltensor.shape = const_cast(shape.data());
+dltensor.strides = NULL;
+dltensor.byte_offset = 0;
+dltensor.dtype.lanes = 1;
+switch(dtype) {
+case kFloat32:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 32;
+  break;
+case kFloat64:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 64;
+  break;
+case kFloat16:
+  dltensor.dtype.code = kDLFloat;
+  dltensor.dtype.bits = 16;
+  break;
+case kUint8:
+  dltensor.dtype.code = kDLUInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt32:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 32;
+  break;
+case kInt8:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 8;
+  break;
+case kInt64:
+  dltensor.dtype.code = kDLInt;
+  dltensor.dtype.bits = 64;
 
 Review comment:
   What if we just do this:
   ```
   default:
 dltensor.dtype.code = 0;
 dltensor.dtype.bits = 0;
 std::cout << "Error! Invalid dtype flag: " << dtype << std::endl;
   ```
   Since we're not including any glog we cant do "LOG(FATAL)". 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-17 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r336303413
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -38,6 +38,154 @@
 
 #define MX_LIBRARY_VERSION 1
 
+/*
+ * Import from DLPack 
https://github.com/dmlc/dlpack/blob/master/include/dlpack/dlpack.h
 
 Review comment:
   We want to keep everything in a single header file


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-12 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r334231752
 
 

 ##
 File path: example/extensions/lib_custom_op/gemm_lib.cc
 ##
 @@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file gemm_lib.cc
+ * \brief Sample 2D gemm custom operator implementation library file
+ */
+
+#include 
+#include "lib_api.h"
+
+// main matrix multiplication routine
+void gemm(const float* A, const float* B, float* C,
+  const unsigned n, const unsigned k, const unsigned m) {
+  unsigned i, j, kk;
+  for (i = 0; i < n; i++) {
+for (j = 0; j < m; j++) {
+  C[i*m+j] = 0;
+  for (kk = 0; kk < k; kk++) {
+C[i*m+j] += A[i*k+kk] * B[kk*m+j];
+  }
+}
+  }
+}
+
+void transpose(const float* A, float* At, const unsigned n, const unsigned m) {
+  unsigned i, j;
+  for (i = 0; i < n; i++) {
+for (j = 0; j < m; j++) {
+  At[i*m+j] = A[j*n+i];
+}
+  }
+}
+
+/*
+ * Executes C = A * B
+ * inputs[0] = A; inputs[1] = B; outputs[0] = C
+ */
+MXReturnValue forward(std::map attrs,
+  std::vector inputs,
+  std::vector outputs,
+  OpResource res) {
+  // simple example of using runtime data type
+  if (inputs[0].dtype == kFloat32) {
+typedef float DType;
+// extract data pointers from tensors
+DType* A = inputs[0].data();
+DType* B = inputs[1].data();
+DType* C = outputs[0].data();
+// set tensor shapes
+unsigned n = inputs[0].shape[0];
+unsigned k = inputs[0].shape[1];
+unsigned m = inputs[1].shape[1];
+
+gemm(A, B, C, n, k, m);
+  }
+  return MX_SUCCESS;
+}
+
+/*
+ * Executes dA = dC * B.T; Executes dB = A.T * dC
+ * gradient inputs
+ * inputs[0] = dC
+ * original inputs
+ * inputs[1] = A; inputs[2] = B
+ * original outputs
+ * inputs[3] = C
+ * gradient outputs
+ * outputs[0] = dA; outputs[1] = dB
+ */
+MXReturnValue backward(std::map attrs,
+   std::vector inputs,
+   std::vector outputs,
+   OpResource res) {
+  // extract data pointers from tensors
+  float* dC = inputs[0].data();
+  float* A = inputs[1].data();
+  float* B = inputs[2].data();
+  float* dA = outputs[0].data();
+  float* dB = outputs[1].data();
+  // set tensor shapes
+  unsigned n = inputs[1].shape[0];
+  unsigned k = inputs[1].shape[1];
+  unsigned m = inputs[2].shape[1];
+
+  float *At = new float[k*n];
 
 Review comment:
   @wkcn remember in this PR there is only CPU operator support. The next PR 
will have GPU support and we’ll make sure to distinguish cpu/gpu allocations. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-11 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r334198386
 
 

 ##
 File path: python/mxnet/__init__.py
 ##
 @@ -87,6 +86,9 @@
 
 from . import gluon
 
+# Dynamic library module should be done after ndarray and symbol are 
initialized
+from . import library
 
 Review comment:
   @eric-haibin-lin We are not working on docs/tutorials in this PR. Ive added 
an entry to the PR description in the "Future/Next-steps" section that we'll 
address in the next PR.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-11 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r334197057
 
 

 ##
 File path: example/extensions/lib_custom_op/gemm_lib.cc
 ##
 @@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file gemm_lib.cc
+ * \brief Sample 2D gemm custom operator implementation library file
+ */
+
+#include 
+#include "lib_api.h"
+
+// main matrix multiplication routine
+void gemm(const float* A, const float* B, float* C,
+  const unsigned n, const unsigned k, const unsigned m) {
+  unsigned i, j, kk;
+  for (i = 0; i < n; i++) {
+for (j = 0; j < m; j++) {
+  C[i*m+j] = 0;
+  for (kk = 0; kk < k; kk++) {
+C[i*m+j] += A[i*k+kk] * B[kk*m+j];
+  }
+}
+  }
+}
+
+void transpose(const float* A, float* At, const unsigned n, const unsigned m) {
+  unsigned i, j;
+  for (i = 0; i < n; i++) {
+for (j = 0; j < m; j++) {
+  At[i*m+j] = A[j*n+i];
+}
+  }
+}
+
+/*
+ * Executes C = A * B
+ * inputs[0] = A; inputs[1] = B; outputs[0] = C
+ */
+MXReturnValue forward(std::map attrs,
+  std::vector inputs,
+  std::vector outputs,
+  OpResource res) {
+  // simple example of using runtime data type
+  if (inputs[0].dtype == kFloat32) {
+typedef float DType;
+// extract data pointers from tensors
+DType* A = inputs[0].data();
+DType* B = inputs[1].data();
+DType* C = outputs[0].data();
+// set tensor shapes
+unsigned n = inputs[0].shape[0];
+unsigned k = inputs[0].shape[1];
+unsigned m = inputs[1].shape[1];
+
+gemm(A, B, C, n, k, m);
+  }
+  return MX_SUCCESS;
+}
+
+/*
+ * Executes dA = dC * B.T; Executes dB = A.T * dC
+ * gradient inputs
+ * inputs[0] = dC
+ * original inputs
+ * inputs[1] = A; inputs[2] = B
+ * original outputs
+ * inputs[3] = C
+ * gradient outputs
+ * outputs[0] = dA; outputs[1] = dB
+ */
+MXReturnValue backward(std::map attrs,
+   std::vector inputs,
+   std::vector outputs,
+   OpResource res) {
+  // extract data pointers from tensors
+  float* dC = inputs[0].data();
+  float* A = inputs[1].data();
+  float* B = inputs[2].data();
+  float* dA = outputs[0].data();
+  float* dB = outputs[1].data();
+  // set tensor shapes
+  unsigned n = inputs[1].shape[0];
+  unsigned k = inputs[1].shape[1];
+  unsigned m = inputs[2].shape[1];
+
+  float *At = new float[k*n];
 
 Review comment:
   @rondogency can we use OpResource to allocate the memory instead of using 
"new" like @eric-haibin-lin is suggesting?
   We need to make sure we only make one call to allocate memory, so make sure 
to request the amount for both At and Bt and then split up the memory manually. 
For example:
   ```
   void *mem = res.alloc((k*n + m*k)*sizeof(float));
   float *At = mem;
   float *Bt = mem + (k*n*4);
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-11 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r334197057
 
 

 ##
 File path: example/extensions/lib_custom_op/gemm_lib.cc
 ##
 @@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file gemm_lib.cc
+ * \brief Sample 2D gemm custom operator implementation library file
+ */
+
+#include 
+#include "lib_api.h"
+
+// main matrix multiplication routine
+void gemm(const float* A, const float* B, float* C,
+  const unsigned n, const unsigned k, const unsigned m) {
+  unsigned i, j, kk;
+  for (i = 0; i < n; i++) {
+for (j = 0; j < m; j++) {
+  C[i*m+j] = 0;
+  for (kk = 0; kk < k; kk++) {
+C[i*m+j] += A[i*k+kk] * B[kk*m+j];
+  }
+}
+  }
+}
+
+void transpose(const float* A, float* At, const unsigned n, const unsigned m) {
+  unsigned i, j;
+  for (i = 0; i < n; i++) {
+for (j = 0; j < m; j++) {
+  At[i*m+j] = A[j*n+i];
+}
+  }
+}
+
+/*
+ * Executes C = A * B
+ * inputs[0] = A; inputs[1] = B; outputs[0] = C
+ */
+MXReturnValue forward(std::map attrs,
+  std::vector inputs,
+  std::vector outputs,
+  OpResource res) {
+  // simple example of using runtime data type
+  if (inputs[0].dtype == kFloat32) {
+typedef float DType;
+// extract data pointers from tensors
+DType* A = inputs[0].data();
+DType* B = inputs[1].data();
+DType* C = outputs[0].data();
+// set tensor shapes
+unsigned n = inputs[0].shape[0];
+unsigned k = inputs[0].shape[1];
+unsigned m = inputs[1].shape[1];
+
+gemm(A, B, C, n, k, m);
+  }
+  return MX_SUCCESS;
+}
+
+/*
+ * Executes dA = dC * B.T; Executes dB = A.T * dC
+ * gradient inputs
+ * inputs[0] = dC
+ * original inputs
+ * inputs[1] = A; inputs[2] = B
+ * original outputs
+ * inputs[3] = C
+ * gradient outputs
+ * outputs[0] = dA; outputs[1] = dB
+ */
+MXReturnValue backward(std::map attrs,
+   std::vector inputs,
+   std::vector outputs,
+   OpResource res) {
+  // extract data pointers from tensors
+  float* dC = inputs[0].data();
+  float* A = inputs[1].data();
+  float* B = inputs[2].data();
+  float* dA = outputs[0].data();
+  float* dB = outputs[1].data();
+  // set tensor shapes
+  unsigned n = inputs[1].shape[0];
+  unsigned k = inputs[1].shape[1];
+  unsigned m = inputs[2].shape[1];
+
+  float *At = new float[k*n];
 
 Review comment:
   @rondogency can we use OpResource to allocate the memory instead of using 
"new" like @eric-haibin-lin is suggesting?
   We need to make sure we only make one call to allocate memory, so make sure 
to request the amount for both At and Bt and then split up the memory manually. 
For example:
   ```
   float *mem = alloc();
   float *At = mem;
   float *Bt = mem + (k*n);
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-09 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r333269323
 
 

 ##
 File path: src/c_api/c_api.cc
 ##
 @@ -547,37 +552,39 @@ int MXLoadLib(const char *path) {
 };
 
 // stateful forward and backward
-auto fstateful_lambda = [=](bool forward,
+auto fstateful_lambda = [=](bool is_forward,
 const OpStatePtr& state_ptr,
 const OpContext& ctx,
 const std::vector& inputs,
 const std::vector& req,
 const std::vector& outputs) {
-  // create a vector of tensors for inputs
-  std::vector c_inputs(inputs.size());
+  std::vector in_data, out_data;
+  std::vector in_shapes, out_shapes;
+  std::vector in_dims, out_dims;
+  std::vector in_types, out_types;
+
+  // convert input tensors to constituent parts
   for (size_t i = 0; i < inputs.size(); i++) {
-c_inputs[i].data_ptr = inputs[i].data().dptr_;
-c_inputs[i].dtype = (MXDType)inputs[i].dtype();
-for (int_least16_t j = 0; j < inputs[i].shape().ndim(); j++) {
-  c_inputs[i].shape.push_back(inputs[i].shape().data()[j]);
-}
+in_data.push_back(inputs[i].data().dptr_);
+in_shapes.push_back(inputs[i].shape().data());
+in_dims.push_back(inputs[i].shape().ndim());
+in_types.push_back(inputs[i].dtype());
   }
 
-  // create a vector of tensors for outputs
-  std::vector c_outputs(outputs.size());
+  // convert output tensors to constituent parts
   for (size_t i = 0; i < outputs.size(); i++) {
-c_outputs[i].data_ptr = outputs[i].data().dptr_;
-c_outputs[i].dtype = (MXDType)outputs[i].dtype();
-for (int j = 0; j < outputs[i].shape().ndim(); j++) {
-  c_outputs[i].shape.push_back(outputs[i].shape().data()[j]);
-}
+out_data.push_back(outputs[i].data().dptr_);
+out_shapes.push_back(outputs[i].shape().data());
+out_dims.push_back(outputs[i].shape().ndim());
+out_types.push_back(outputs[i].dtype());
   }
 
   // get memory resource
   const Resource  = ctx.requested[0];
   mshadow::Stream *cpu_stream = ctx.get_stream();
 
   // create lambda that captures stream & resource objects
+  // the memory pointer returned will eventually return to user
 
 Review comment:
   nit: consider rephrasing


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-09 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r333268984
 
 

 ##
 File path: src/c_api/c_api.cc
 ##
 @@ -406,6 +409,7 @@ int MXLoadLib(const char *path) {
   mshadow::Stream *cpu_stream = ctx.get_stream();
 
   // create lambda that captures stream & resource objects
+  // the memory pointer returned will eventually return to user
 
 Review comment:
   nit: consider rephrasing


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332278862
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -18,33 +18,754 @@
  */
 
 /*!
- * Copyright (c) 2015 by Contributors
+ * Copyright (c) 2019 by Contributors
  * \file lib_api.h
  * \brief APIs to interact with libraries
+ * This API specifies function prototypes to
+ * register custom ops for library authors
  */
+
 #ifndef MXNET_LIB_API_H_
 #define MXNET_LIB_API_H_
 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define MX_LIBRARY_VERSION 1
+
+/*!
+ * \brief Tensor data type, consistent with mshadow data type
+ */
+enum MXDType {
+  kFloat32 = 0,
+  kFloat64 = 1,
+  kFloat16 = 2,
+  kUint8 = 3,
+  kInt32 = 4,
+  kInt8  = 5,
+  kInt64 = 6,
+};
+
+enum MXReturnValue {
+  MX_FAIL = 0,
+  MX_SUCCESS = 1,
+};
+
+/*!
+ * \brief Tensor data structure used by custom operator
+ */
+struct MXTensor {
+  MXTensor() : data_ptr(NULL) {}
+
+  MXTensor(void *data_ptr, const std::vector , MXDType dtype)
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+
+  /*! \brief helper function to cast data pointer */
+  template
+  inline data_type* data() {
+return reinterpret_cast(data_ptr);
+  }
+
+  /*! \brief helper function to get data size */
+  inline int64_t size() {
+int64_t size = 1;
+for (unsigned int i = 0; i < shape.size(); i++) {
+  size *= shape[i];
+}
+return size;
+  }
+
+  // data is flatten 1D repr of tensor, elements are in continuous memory
+  // user can access each element using the shape of tensor
+  // it may also point to data allocated on gpu
+  void *data_ptr;
+
+  // shape is in [2,3,4] format to represent high-dim tensor
+  std::vector shape;
+
+  // type can only be MXDType enum types
+  MXDType dtype;
+
+  // gpu flag to specify the data tensor storage location
+  bool is_gpu;
+};
+
+/*!
+ * \brief resource malloc function to allocate memory inside Forward/Backward 
functions
+ */
+typedef void* (*xpu_malloc_t)(void*, int);
+
+/*!
+ * \brief provide resource APIs memory allocation mechanism to 
Forward/Backward functions
+ */
+class OpResource {
+ public:
+  OpResource(xpu_malloc_t xm, void* _xm) : xpu_malloc(xm), _xpu_malloc(_xm) {}
+
+  /*! \brief allocate memory controlled by MXNet */
+  void* alloc(int size) {
+return xpu_malloc(_xpu_malloc, size);
+  }
+
+ private:
+  xpu_malloc_t xpu_malloc;
+  void* _xpu_malloc;
+};
+
+/*!
+ * \brief Json utility to parse serialized subgraph symbol
+ */
+/*! \brief Macro to help passing serialized subgraph through attribute dict */
+#define SUBGRAPH_SYM_JSON "subgraph_sym_json"
+
+/*! \brief Types of JSON objects */
+enum json_type {ERR, STR, NUM, LIST, MAP};
+
+/*! \brief definition of JSON objects */
+struct json_val {
+  json_val() : type(ERR), num(-1), str("") {}  // default constructor
+  // construct a JSON object by type
+  explicit json_val(json_type t) : type(t), num(-1), str("") {}
+  // construct a string JSON object
+  explicit json_val(std::string s) : type(STR), num(-1), str(s) {}
+  // construct a number JSON object
+  explicit json_val(int n) : type(NUM), num(n), str(std::to_string(n)) {}
+  // complex constructor
+  json_val(json_type t, int n, std::string s) : type(t), num(n), str(s) {}
+  bool operator<(const json_val ) const {
+// for string JSON objects compare the string
+if (type == STR) return type == o.type && str < o.str;
+// for number JSON objects compare the number
+if (type == NUM) return type == o.type && num < o.num;
+// for list JSON objects, compare the size of list, and then each object 
in the list
+if (type == LIST) {
+  if (list.size() != o.list.size()) return false;
+  for (unsigned int i=0; i< list.size(); i++)
+if (list[i] < o.list[i])
+  return false;  // if we find an object that doesnt match return
+  return true;  // all objects in lists matched
+}
+// for map JSON objects, compare the size of map, and then each key/value 
in the maps
+if (type == MAP) {
+  if (map.size() != o.map.size()) return false;
+  for (auto  : map) {
+// if one map is missing a key in another return
+if (o.map.find(item.first) == o.map.end()) return false;
+if (item.second < o.map.at(item.first)) return false;
+  }
+  return true;
+}
+return type < o.type;
+  }
+  json_type type;
+  int num;
+  std::string str;
+  std::vector list;
+  std::map map;
+};
+
+/*! \brief functions used for parsing JSON */
+struct Json_Parser {
+  json_val parse_to_json(std::string json) {
+unsigned int idx = 0;
+return parse(json, );
+  }
+  void print_json_val(json_val val) {
+std::cout << json_val_string(val) << std::endl;
+  }
+  // debug function to convert a JSON object to a string
+  std::string json_val_string(const json_val ) {
+std::string ret;
+switch (val.type) {
+   

[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332278862
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -18,33 +18,754 @@
  */
 
 /*!
- * Copyright (c) 2015 by Contributors
+ * Copyright (c) 2019 by Contributors
  * \file lib_api.h
  * \brief APIs to interact with libraries
+ * This API specifies function prototypes to
+ * register custom ops for library authors
  */
+
 #ifndef MXNET_LIB_API_H_
 #define MXNET_LIB_API_H_
 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define MX_LIBRARY_VERSION 1
+
+/*!
+ * \brief Tensor data type, consistent with mshadow data type
+ */
+enum MXDType {
+  kFloat32 = 0,
+  kFloat64 = 1,
+  kFloat16 = 2,
+  kUint8 = 3,
+  kInt32 = 4,
+  kInt8  = 5,
+  kInt64 = 6,
+};
+
+enum MXReturnValue {
+  MX_FAIL = 0,
+  MX_SUCCESS = 1,
+};
+
+/*!
+ * \brief Tensor data structure used by custom operator
+ */
+struct MXTensor {
+  MXTensor() : data_ptr(NULL) {}
+
+  MXTensor(void *data_ptr, const std::vector , MXDType dtype)
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+
+  /*! \brief helper function to cast data pointer */
+  template
+  inline data_type* data() {
+return reinterpret_cast(data_ptr);
+  }
+
+  /*! \brief helper function to get data size */
+  inline int64_t size() {
+int64_t size = 1;
+for (unsigned int i = 0; i < shape.size(); i++) {
+  size *= shape[i];
+}
+return size;
+  }
+
+  // data is flatten 1D repr of tensor, elements are in continuous memory
+  // user can access each element using the shape of tensor
+  // it may also point to data allocated on gpu
+  void *data_ptr;
+
+  // shape is in [2,3,4] format to represent high-dim tensor
+  std::vector shape;
+
+  // type can only be MXDType enum types
+  MXDType dtype;
+
+  // gpu flag to specify the data tensor storage location
+  bool is_gpu;
+};
+
+/*!
+ * \brief resource malloc function to allocate memory inside Forward/Backward 
functions
+ */
+typedef void* (*xpu_malloc_t)(void*, int);
+
+/*!
+ * \brief provide resource APIs memory allocation mechanism to 
Forward/Backward functions
+ */
+class OpResource {
+ public:
+  OpResource(xpu_malloc_t xm, void* _xm) : xpu_malloc(xm), _xpu_malloc(_xm) {}
+
+  /*! \brief allocate memory controlled by MXNet */
+  void* alloc(int size) {
+return xpu_malloc(_xpu_malloc, size);
+  }
+
+ private:
+  xpu_malloc_t xpu_malloc;
+  void* _xpu_malloc;
+};
+
+/*!
+ * \brief Json utility to parse serialized subgraph symbol
+ */
+/*! \brief Macro to help passing serialized subgraph through attribute dict */
+#define SUBGRAPH_SYM_JSON "subgraph_sym_json"
+
+/*! \brief Types of JSON objects */
+enum json_type {ERR, STR, NUM, LIST, MAP};
+
+/*! \brief definition of JSON objects */
+struct json_val {
+  json_val() : type(ERR), num(-1), str("") {}  // default constructor
+  // construct a JSON object by type
+  explicit json_val(json_type t) : type(t), num(-1), str("") {}
+  // construct a string JSON object
+  explicit json_val(std::string s) : type(STR), num(-1), str(s) {}
+  // construct a number JSON object
+  explicit json_val(int n) : type(NUM), num(n), str(std::to_string(n)) {}
+  // complex constructor
+  json_val(json_type t, int n, std::string s) : type(t), num(n), str(s) {}
+  bool operator<(const json_val ) const {
+// for string JSON objects compare the string
+if (type == STR) return type == o.type && str < o.str;
+// for number JSON objects compare the number
+if (type == NUM) return type == o.type && num < o.num;
+// for list JSON objects, compare the size of list, and then each object 
in the list
+if (type == LIST) {
+  if (list.size() != o.list.size()) return false;
+  for (unsigned int i=0; i< list.size(); i++)
+if (list[i] < o.list[i])
+  return false;  // if we find an object that doesnt match return
+  return true;  // all objects in lists matched
+}
+// for map JSON objects, compare the size of map, and then each key/value 
in the maps
+if (type == MAP) {
+  if (map.size() != o.map.size()) return false;
+  for (auto  : map) {
+// if one map is missing a key in another return
+if (o.map.find(item.first) == o.map.end()) return false;
+if (item.second < o.map.at(item.first)) return false;
+  }
+  return true;
+}
+return type < o.type;
+  }
+  json_type type;
+  int num;
+  std::string str;
+  std::vector list;
+  std::map map;
+};
+
+/*! \brief functions used for parsing JSON */
+struct Json_Parser {
+  json_val parse_to_json(std::string json) {
+unsigned int idx = 0;
+return parse(json, );
+  }
+  void print_json_val(json_val val) {
+std::cout << json_val_string(val) << std::endl;
+  }
+  // debug function to convert a JSON object to a string
+  std::string json_val_string(const json_val ) {
+std::string ret;
+switch (val.type) {
+   

[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332277532
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -18,33 +18,754 @@
  */
 
 /*!
- * Copyright (c) 2015 by Contributors
+ * Copyright (c) 2019 by Contributors
  * \file lib_api.h
  * \brief APIs to interact with libraries
+ * This API specifies function prototypes to
+ * register custom ops for library authors
  */
+
 #ifndef MXNET_LIB_API_H_
 #define MXNET_LIB_API_H_
 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define MX_LIBRARY_VERSION 1
+
+/*!
+ * \brief Tensor data type, consistent with mshadow data type
+ */
+enum MXDType {
+  kFloat32 = 0,
+  kFloat64 = 1,
+  kFloat16 = 2,
+  kUint8 = 3,
+  kInt32 = 4,
+  kInt8  = 5,
+  kInt64 = 6,
+};
+
+enum MXReturnValue {
+  MX_FAIL = 0,
+  MX_SUCCESS = 1,
+};
+
+/*!
+ * \brief Tensor data structure used by custom operator
+ */
+struct MXTensor {
+  MXTensor() : data_ptr(NULL) {}
+
+  MXTensor(void *data_ptr, const std::vector , MXDType dtype)
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+
+  /*! \brief helper function to cast data pointer */
+  template
+  inline data_type* data() {
+return reinterpret_cast(data_ptr);
+  }
+
+  /*! \brief helper function to get data size */
+  inline int64_t size() {
+int64_t size = 1;
+for (unsigned int i = 0; i < shape.size(); i++) {
+  size *= shape[i];
+}
+return size;
+  }
+
+  // data is flatten 1D repr of tensor, elements are in continuous memory
+  // user can access each element using the shape of tensor
+  // it may also point to data allocated on gpu
+  void *data_ptr;
+
+  // shape is in [2,3,4] format to represent high-dim tensor
+  std::vector shape;
+
+  // type can only be MXDType enum types
+  MXDType dtype;
+
+  // gpu flag to specify the data tensor storage location
+  bool is_gpu;
+};
+
+/*!
+ * \brief resource malloc function to allocate memory inside Forward/Backward 
functions
+ */
+typedef void* (*xpu_malloc_t)(void*, int);
+
+/*!
+ * \brief provide resource APIs memory allocation mechanism to 
Forward/Backward functions
+ */
+class OpResource {
+ public:
+  OpResource(xpu_malloc_t xm, void* _xm) : xpu_malloc(xm), _xpu_malloc(_xm) {}
+
+  /*! \brief allocate memory controlled by MXNet */
+  void* alloc(int size) {
+return xpu_malloc(_xpu_malloc, size);
+  }
+
+ private:
+  xpu_malloc_t xpu_malloc;
+  void* _xpu_malloc;
+};
+
+/*!
+ * \brief Json utility to parse serialized subgraph symbol
+ */
+/*! \brief Macro to help passing serialized subgraph through attribute dict */
+#define SUBGRAPH_SYM_JSON "subgraph_sym_json"
+
+/*! \brief Types of JSON objects */
+enum json_type {ERR, STR, NUM, LIST, MAP};
+
+/*! \brief definition of JSON objects */
+struct json_val {
+  json_val() : type(ERR), num(-1), str("") {}  // default constructor
+  // construct a JSON object by type
+  explicit json_val(json_type t) : type(t), num(-1), str("") {}
+  // construct a string JSON object
+  explicit json_val(std::string s) : type(STR), num(-1), str(s) {}
+  // construct a number JSON object
+  explicit json_val(int n) : type(NUM), num(n), str(std::to_string(n)) {}
+  // complex constructor
+  json_val(json_type t, int n, std::string s) : type(t), num(n), str(s) {}
+  bool operator<(const json_val ) const {
+// for string JSON objects compare the string
+if (type == STR) return type == o.type && str < o.str;
+// for number JSON objects compare the number
+if (type == NUM) return type == o.type && num < o.num;
+// for list JSON objects, compare the size of list, and then each object 
in the list
+if (type == LIST) {
+  if (list.size() != o.list.size()) return false;
+  for (unsigned int i=0; i< list.size(); i++)
+if (list[i] < o.list[i])
+  return false;  // if we find an object that doesnt match return
+  return true;  // all objects in lists matched
+}
+// for map JSON objects, compare the size of map, and then each key/value 
in the maps
+if (type == MAP) {
+  if (map.size() != o.map.size()) return false;
+  for (auto  : map) {
+// if one map is missing a key in another return
+if (o.map.find(item.first) == o.map.end()) return false;
+if (item.second < o.map.at(item.first)) return false;
+  }
+  return true;
+}
+return type < o.type;
+  }
+  json_type type;
+  int num;
+  std::string str;
+  std::vector list;
+  std::map map;
+};
+
+/*! \brief functions used for parsing JSON */
+struct Json_Parser {
 
 Review comment:
   change to JsonParser


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:

[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332277402
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -18,33 +18,754 @@
  */
 
 /*!
- * Copyright (c) 2015 by Contributors
+ * Copyright (c) 2019 by Contributors
  * \file lib_api.h
  * \brief APIs to interact with libraries
+ * This API specifies function prototypes to
+ * register custom ops for library authors
  */
+
 #ifndef MXNET_LIB_API_H_
 #define MXNET_LIB_API_H_
 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define MX_LIBRARY_VERSION 1
+
+/*!
+ * \brief Tensor data type, consistent with mshadow data type
+ */
+enum MXDType {
+  kFloat32 = 0,
+  kFloat64 = 1,
+  kFloat16 = 2,
+  kUint8 = 3,
+  kInt32 = 4,
+  kInt8  = 5,
+  kInt64 = 6,
+};
+
+enum MXReturnValue {
+  MX_FAIL = 0,
+  MX_SUCCESS = 1,
+};
+
+/*!
+ * \brief Tensor data structure used by custom operator
+ */
+struct MXTensor {
+  MXTensor() : data_ptr(NULL) {}
+
+  MXTensor(void *data_ptr, const std::vector , MXDType dtype)
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+
+  /*! \brief helper function to cast data pointer */
+  template
+  inline data_type* data() {
+return reinterpret_cast(data_ptr);
+  }
+
+  /*! \brief helper function to get data size */
+  inline int64_t size() {
+int64_t size = 1;
+for (unsigned int i = 0; i < shape.size(); i++) {
+  size *= shape[i];
+}
+return size;
+  }
+
+  // data is flatten 1D repr of tensor, elements are in continuous memory
+  // user can access each element using the shape of tensor
+  // it may also point to data allocated on gpu
+  void *data_ptr;
+
+  // shape is in [2,3,4] format to represent high-dim tensor
+  std::vector shape;
+
+  // type can only be MXDType enum types
+  MXDType dtype;
+
+  // gpu flag to specify the data tensor storage location
+  bool is_gpu;
+};
+
+/*!
+ * \brief resource malloc function to allocate memory inside Forward/Backward 
functions
+ */
+typedef void* (*xpu_malloc_t)(void*, int);
+
+/*!
+ * \brief provide resource APIs memory allocation mechanism to 
Forward/Backward functions
+ */
+class OpResource {
+ public:
+  OpResource(xpu_malloc_t xm, void* _xm) : xpu_malloc(xm), _xpu_malloc(_xm) {}
+
+  /*! \brief allocate memory controlled by MXNet */
+  void* alloc(int size) {
+return xpu_malloc(_xpu_malloc, size);
+  }
+
+ private:
+  xpu_malloc_t xpu_malloc;
+  void* _xpu_malloc;
 
 Review comment:
   This is a pointer to the cpu_malloc lambda function that eventually just 
passes the value to:
   ```
   mshadow::Tensor data =
   resource.get_space_typed(mshadow::Shape1(size), 
cpu_stream);
   ```
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332276986
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -18,33 +18,754 @@
  */
 
 /*!
- * Copyright (c) 2015 by Contributors
+ * Copyright (c) 2019 by Contributors
  * \file lib_api.h
  * \brief APIs to interact with libraries
+ * This API specifies function prototypes to
+ * register custom ops for library authors
  */
+
 #ifndef MXNET_LIB_API_H_
 #define MXNET_LIB_API_H_
 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define MX_LIBRARY_VERSION 1
+
+/*!
+ * \brief Tensor data type, consistent with mshadow data type
+ */
+enum MXDType {
+  kFloat32 = 0,
+  kFloat64 = 1,
+  kFloat16 = 2,
+  kUint8 = 3,
+  kInt32 = 4,
+  kInt8  = 5,
+  kInt64 = 6,
+};
+
+enum MXReturnValue {
+  MX_FAIL = 0,
+  MX_SUCCESS = 1,
+};
+
+/*!
+ * \brief Tensor data structure used by custom operator
+ */
+struct MXTensor {
+  MXTensor() : data_ptr(NULL) {}
+
+  MXTensor(void *data_ptr, const std::vector , MXDType dtype)
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+
+  /*! \brief helper function to cast data pointer */
+  template
+  inline data_type* data() {
+return reinterpret_cast(data_ptr);
+  }
+
+  /*! \brief helper function to get data size */
+  inline int64_t size() {
+int64_t size = 1;
+for (unsigned int i = 0; i < shape.size(); i++) {
+  size *= shape[i];
+}
+return size;
+  }
+
+  // data is flatten 1D repr of tensor, elements are in continuous memory
+  // user can access each element using the shape of tensor
+  // it may also point to data allocated on gpu
+  void *data_ptr;
+
+  // shape is in [2,3,4] format to represent high-dim tensor
+  std::vector shape;
+
+  // type can only be MXDType enum types
+  MXDType dtype;
+
+  // gpu flag to specify the data tensor storage location
+  bool is_gpu;
+};
+
+/*!
+ * \brief resource malloc function to allocate memory inside Forward/Backward 
functions
+ */
+typedef void* (*xpu_malloc_t)(void*, int);
 
 Review comment:
   This just passes the value to:
   ```
   mshadow::Tensor data =
   resource.get_space_typed(mshadow::Shape1(size), 
cpu_stream);
   ```
   
   So it doesnt need to be aligned, but the user can choose to align as they 
want. Again, currently this is only for allocating CPU memory via the MXNet 
Resource manager. 
   
   Currently we're only support 32-bit tensor dimensions in this PR, we'll look 
to support large tensor operators in the next PR (after large tensor support is 
formally added in their forthcoming PRs). 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332275641
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -18,33 +18,754 @@
  */
 
 /*!
- * Copyright (c) 2015 by Contributors
+ * Copyright (c) 2019 by Contributors
  * \file lib_api.h
  * \brief APIs to interact with libraries
+ * This API specifies function prototypes to
+ * register custom ops for library authors
  */
+
 #ifndef MXNET_LIB_API_H_
 #define MXNET_LIB_API_H_
 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define MX_LIBRARY_VERSION 1
+
+/*!
+ * \brief Tensor data type, consistent with mshadow data type
+ */
+enum MXDType {
+  kFloat32 = 0,
+  kFloat64 = 1,
+  kFloat16 = 2,
+  kUint8 = 3,
+  kInt32 = 4,
+  kInt8  = 5,
+  kInt64 = 6,
+};
+
+enum MXReturnValue {
+  MX_FAIL = 0,
+  MX_SUCCESS = 1,
+};
+
+/*!
+ * \brief Tensor data structure used by custom operator
+ */
+struct MXTensor {
+  MXTensor() : data_ptr(NULL) {}
+
+  MXTensor(void *data_ptr, const std::vector , MXDType dtype)
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+
+  /*! \brief helper function to cast data pointer */
+  template
+  inline data_type* data() {
+return reinterpret_cast(data_ptr);
+  }
+
+  /*! \brief helper function to get data size */
+  inline int64_t size() {
+int64_t size = 1;
+for (unsigned int i = 0; i < shape.size(); i++) {
+  size *= shape[i];
+}
+return size;
+  }
+
+  // data is flatten 1D repr of tensor, elements are in continuous memory
+  // user can access each element using the shape of tensor
+  // it may also point to data allocated on gpu
+  void *data_ptr;
+
+  // shape is in [2,3,4] format to represent high-dim tensor
+  std::vector shape;
+
+  // type can only be MXDType enum types
+  MXDType dtype;
+
+  // gpu flag to specify the data tensor storage location
+  bool is_gpu;
 
 Review comment:
   Another good catch, this not currently relevant since we dont support GPU 
operators yet. Currently we only register FCompute for now. This should be 
addressed in the followup PR where we should also take a device_id or something 
to identify which device the array was allocated on. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332275249
 
 

 ##
 File path: include/mxnet/lib_api.h
 ##
 @@ -18,33 +18,754 @@
  */
 
 /*!
- * Copyright (c) 2015 by Contributors
+ * Copyright (c) 2019 by Contributors
  * \file lib_api.h
  * \brief APIs to interact with libraries
+ * This API specifies function prototypes to
+ * register custom ops for library authors
  */
+
 #ifndef MXNET_LIB_API_H_
 #define MXNET_LIB_API_H_
 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define MX_LIBRARY_VERSION 1
+
+/*!
+ * \brief Tensor data type, consistent with mshadow data type
+ */
+enum MXDType {
+  kFloat32 = 0,
+  kFloat64 = 1,
+  kFloat16 = 2,
+  kUint8 = 3,
+  kInt32 = 4,
+  kInt8  = 5,
+  kInt64 = 6,
+};
+
+enum MXReturnValue {
+  MX_FAIL = 0,
+  MX_SUCCESS = 1,
+};
+
+/*!
+ * \brief Tensor data structure used by custom operator
+ */
+struct MXTensor {
+  MXTensor() : data_ptr(NULL) {}
+
+  MXTensor(void *data_ptr, const std::vector , MXDType dtype)
+  : data_ptr(data_ptr), shape(shape), dtype(dtype) {}
+
+  /*! \brief helper function to cast data pointer */
+  template
+  inline data_type* data() {
+return reinterpret_cast(data_ptr);
+  }
+
+  /*! \brief helper function to get data size */
+  inline int64_t size() {
+int64_t size = 1;
+for (unsigned int i = 0; i < shape.size(); i++) {
+  size *= shape[i];
+}
+return size;
+  }
+
+  // data is flatten 1D repr of tensor, elements are in continuous memory
+  // user can access each element using the shape of tensor
+  // it may also point to data allocated on gpu
+  void *data_ptr;
+
+  // shape is in [2,3,4] format to represent high-dim tensor
+  std::vector shape;
 
 Review comment:
   Good catch @junrushao1994, we dont assume MXNet and the custom operator 
library use the same C++ runtime


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #15921: dynamic custom operator support

2019-10-07 Thread GitBox
samskalicky commented on a change in pull request #15921: dynamic custom 
operator support
URL: https://github.com/apache/incubator-mxnet/pull/15921#discussion_r332275130
 
 

 ##
 File path: src/c_api/c_api.cc
 ##
 @@ -92,16 +95,589 @@ inline int MXAPIGetFunctionRegInfo(const FunRegType *e,
 
 // NOTE: return value is added in API_END
 
-// Loads library and initializes it
+/*!
+ * \brief Loads dynamic library and initializes it
+ * \param path library path
+ */
 int MXLoadLib(const char *path) {
   API_BEGIN();
   void *lib = LibraryInitializer::Get()->lib_load(path);
   if (!lib)
 LOG(FATAL) << "Unable to load library";
 
+  // check that library and MXNet use same version of library API
+  opVersion_t opVersion = get_func(lib, 
const_cast(MXLIB_OPVERSION_STR));
+  int libVersion =  opVersion();
+  if (MX_LIBRARY_VERSION != libVersion)
+LOG(FATAL) << "Library version (" << libVersion << ") does not match MXNet 
version ("
+   << MX_LIBRARY_VERSION << ")";
+
+  // initialize library by passing MXNet version
   initialize_t initialize = get_func(lib, 
const_cast(MXLIB_INITIALIZE_STR));
   if (!initialize(static_cast(MXNET_VERSION)))
 LOG(FATAL) << "Library failed to initialize";
+
+  // get C type interface functions
+  opCallFree_t callFree = get_func(lib, 
const_cast(MXLIB_OPCALLFREE_STR));
+
+  opCallParseAttrs_t callParseAttrs =
+get_func(lib, 
const_cast(MXLIB_OPCALLPARSEATTRS_STR));
+
+  opCallInferShape_t callInferShape =
+get_func(lib, 
const_cast(MXLIB_OPCALLINFERSHAPE_STR));
+
+  opCallInferType_t callInferType =
+get_func(lib, 
const_cast(MXLIB_OPCALLINFERTYPE_STR));
+
+  opCallFComp_t callFComp =
+get_func(lib, const_cast(MXLIB_OPCALLFCOMP_STR));
+
+  opCallMutateInputs_t callMutateInputs =
+get_func(lib, 
const_cast(MXLIB_OPCALLMUTATEINPUTS_STR));
+
+  opCallCreateOpState_t callCreateOpState =
+get_func(lib, 
const_cast(MXLIB_OPCALLCREATEOPSTATE_STR));
+
+  // get number of operators registered in the library
+  opRegSize_t opRegSize = get_func(lib, 
const_cast(MXLIB_OPREGSIZE_STR));
+  int numOps = opRegSize();
+  LOG(INFO) << "Found " << numOps << " operators in library";
+
+  /*
+   * The library has custom operators implementation
+   * loop and register each operator in the library to NNVM
+   */
+  opRegGet_t opRegGet = get_func(lib, 
const_cast(MXLIB_OPREGGET_STR));
+  for (int i = 0; i < numOps; i++) {
+const char* name;
+// function pointers holding implementation from custom library
+fcomp_t fcomp_fp = nullptr;
+parseAttrs_t parse_fp = nullptr;
+inferType_t type_fp = nullptr;
+inferShape_t shape_fp = nullptr;
+// optional attributes
+fcomp_t fgrad_fp = nullptr;
+mutateInputs_t mutate_fp = nullptr;
+createOpState_t create_opstate_fp = nullptr;
+
+// get custom operator implemenation from the dynamic library
+opRegGet(i, , _fp, _fp, _fp, _fp, _fp,
+_fp, _opstate_fp);
+
+// validate custom operator functions from the dynamic library
+CHECK(fcomp_fp != nullptr || create_opstate_fp != nullptr) << "Error 
loading '" << name
+<< "' custom op, Forward or CreateOpState function 
was not set.";
+CHECK(parse_fp != nullptr) << "Error loading '" << name
+<< "' custom op, ParseAttrs function was not set.";
+CHECK(type_fp  != nullptr) << "Error loading '" << name
+<< "' custom op, InferType function was not set.";
+CHECK(shape_fp != nullptr) << "Error loading '" << name
+<< "' custom op, InferShape function was not set.";
+
+LOG(INFO) << "\tOp[" << i << "] " << name;
+std::string name_str(name);
+
+/*
+ * Below are a series of lambda functions that will be registered in the 
NNVM op registration
+ * Each one has the standard MXNet signature and converts to types 
supported by externally
+ * registered operators. 
+ */
+
+// lambda function to call parse attributes
+auto attr_parser = [=](const NodeAttrs* attrs) {
+  // convert attributes to vector of char
+  std::vector attr_keys, attr_vals;
+  for (auto kv : attrs->dict) {
+attr_keys.push_back(kv.first.c_str());
+attr_vals.push_back(kv.second.c_str());
+  }
+  // convert subgraph symbol from node attributes to char*
+  std::string subgraph_json;
+  if (!attrs->subgraphs.empty()) {
+nnvm::Graph g;
+g.outputs = attrs->subgraphs[0].get()->outputs;
+subgraph_json = nnvm::pass::SaveJSON(g);
+attr_keys.push_back(SUBGRAPH_SYM_JSON);
+attr_vals.push_back(subgraph_json.c_str());
+  }
+
+  int num_in = -1;
+  int num_out = -1;
+  CHECK(callParseAttrs(parse_fp, attr_keys.data(), attr_vals.data(), 
attr_keys.size(),
+   _in, _out))
+  << "Error calling ParseAttrs for custom operator '" << name_str << "'";
+
+  // return type void
+};