masahi commented on a change in pull request #7172: URL: https://github.com/apache/tvm/pull/7172#discussion_r549892107
########## File path: python/tvm/topi/cuda/nms.py ########## @@ -512,26 +512,50 @@ def calculate_overlap(out_tensor, box_a_idx, box_b_idx): with ib.new_scope(): nthread_by = batch_size + nthread_tx = max_threads + by = te.thread_axis("blockIdx.y") + tx = te.thread_axis("threadIdx.x") ib.scope_attr(by, "thread_extent", nthread_by) + ib.scope_attr(tx, "thread_extent", nthread_tx) + i = by + base_idx = i * num_anchors * box_data_length num_valid_boxes_local = ib.allocate( "int32", (1,), name="num_valid_boxes_local", scope="local" ) num_valid_boxes_local[0] = 0 + nkeep = if_then_else(tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i]) def nms_inner_loop(ib, j): + # The box j is valid, invalidate other boxes that overlap with j above iou_threshold + + # When return_indices is False, no need to populate box_indices + if return_indices: + orig_idx = sorted_index[i * num_anchors + j] + box_indices[i, num_valid_boxes_local[0]] = indices[i, orig_idx] + + # TODO(masahi): Want to do this instead of above, but the following is eliminated + # during codegen + # # Only one thread needs to this write + # with ib.if_scope(tx == 0): + # orig_idx = sorted_index[i * num_anchors + j] + # box_indices[i, num_valid_boxes_local[0]] = indices[i, orig_idx] + Review comment: Yes, I was hoping that removing pointless reads and writes of the same memory location by 1024 threads could make it a bit faster. At least it should be no worse. Not sure if there are many traffic if all threads do the same memory read and write. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org