rahul003 commented on a change in pull request #11356: [MXNET-560][WIP] Add temperature parameter in Softmax and SoftmaxOutput operator URL: https://github.com/apache/incubator-mxnet/pull/11356#discussion_r197292947
########## File path: src/operator/nn/softmax-inl.h ########## @@ -145,23 +155,36 @@ __global__ void softmax_compute_kernel(DType *in, DType *out, index_t M, int axi __syncthreads(); red::sum::SetInitValue(smem[x]); - for (index_t i = x; i < M; i += x_size) { - red::sum::Reduce(smem[x], static_cast<DType>(expf(in[base + i*sa] - smax))); + if (temperature == 1.0) { + for (index_t i = x; i < M; i += x_size) { + red::sum::Reduce(smem[x], static_cast<DType>(expf(in[base + i*sa] - smax))); + } + } else { + for (index_t i = x; i < M; i += x_size) { + red::sum::Reduce(smem[x], static_cast<DType>(expf((in[base + i*sa] - smax)/temperature))); + } } + __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); DType ssum = smem[0]; __syncthreads(); - for (index_t i = x; i < M; i += x_size) { - out[base + i*sa] = OP::Map(in[base + i*sa] - smax, ssum); + if (temperature == 1.0) { Review comment: The division (even if not optimized away) should be better than causing a branch. Especially on the GPU branch divergence can have a significant impact on performance. It's better to avoid it. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services