ZhennanQin commented on a change in pull request #12530: Implement mkldnn 
convolution fusion and quantization.
URL: https://github.com/apache/incubator-mxnet/pull/12530#discussion_r219376896
 
 

 ##########
 File path: src/operator/nn/mkldnn/mkldnn_convolution-inl.h
 ##########
 @@ -35,19 +36,79 @@
 namespace mxnet {
 namespace op {
 
-mkldnn::convolution_forward::primitive_desc GetConvFwdImpl(
-    const ConvolutionParam& param, const bool is_train, const NDArray &data,
-    const NDArray &weights, const NDArray *bias, const NDArray &output);
+struct MKLDNNConvParam : public dmlc::Parameter<MKLDNNConvParam> {
+  // When adding more members into this class, please double check GetHash()
+  // won't overflow.
+  bool with_bn;
+  bool with_relu;
+  bool with_sum;
+  bool with_postsum_relu;
+  bool quantized;
+  bool weight_channelwise_scale;
+
+  dmlc::optional<float> min_calib_range;  // min float value calculated from 
calibration dataset
+  dmlc::optional<float> max_calib_range;  // max float value calculated from 
calibration dataset
+
+  DMLC_DECLARE_PARAMETER(MKLDNNConvParam) {
+    DMLC_DECLARE_FIELD(with_bn).set_default(false)
+    .describe("Add post batchnorm.");
+    DMLC_DECLARE_FIELD(with_relu).set_default(false)
+    .describe("Add post relu");
+    DMLC_DECLARE_FIELD(with_sum).set_default(false)
+    .describe("Add post sum");
+    DMLC_DECLARE_FIELD(with_postsum_relu).set_default(false)
+    .describe("Add post relu after sum");
+    DMLC_DECLARE_FIELD(quantized).set_default(false)
+    .describe("enable quantization");
+    DMLC_DECLARE_FIELD(weight_channelwise_scale).set_default(true)
+    .describe("Quantize weight with channel wise scales.");
+    DMLC_DECLARE_FIELD(min_calib_range)
+    .set_default(dmlc::optional<float>())
+    .describe("The minimum scalar value in the form of float32 obtained "
+              "through calibration. If present, it will be used to by "
+              "quantized convolution op to calculate primitive scale");
+    DMLC_DECLARE_FIELD(max_calib_range)
+    .set_default(dmlc::optional<float>())
+    .describe("The maximum scalar value in the form of float32 obtained "
+              "through calibration. If present, it will be used to by "
+              "quantized convolution op to calculate primitive scale");
+  }
+  const int GetBoolHash() const {
+    int hash = 0;
+    hash = hash * 2 + this->with_bn ? 1 : 0;
+    hash = hash * 2 + this->with_relu ? 1 : 0;
+    hash = hash * 2 + this->with_sum ? 1 : 0;
+    hash = hash * 2 + this->with_postsum_relu ? 1 : 0;
+    hash = hash * 2 + this->quantized ? 1 : 0;
+    return hash;
 
 Review comment:
   Used for calculating param hash when caching and reusing mkldnn op 
primitive. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to