zhiics commented on a change in pull request #6978:
URL: https://github.com/apache/tvm/pull/6978#discussion_r532856178



##########
File path: python/tvm/topi/cuda/sort.py
##########
@@ -593,3 +694,82 @@ def schedule_topk(outs):
       The computation schedule for the op.
     """
     return _schedule_sort(outs)
+
+
+def _dyn_topk_legalize(attrs, inputs, arg_types):
+    """Legalizes dyn.topk op.
+
+    On GPU, we don't directly implement a topi kernel. Instead, we combine
+    topi sort and topi strided slice to implement topk. This topi-level 
composition
+    doesn't work with the dynamic op. To support the dynamic op on gpu, we 
instead
+    legalize it into the same logic (sort + strided slice) in relay to take 
advantage
+    of the VM's dynamic shape capabilities
+
+    Parameters
+    ----------
+    attrs : tvm.ir.Attrs
+        Attributes of current convolution
+    inputs : list of tvm.relay.Expr
+        The args of the Relay expr to be legalized
+    types : list of types
+        List of input and output types
+
+    Returns
+    -------
+    result : tvm.relay.Expr
+        The legalized expr
+    """
+    data = tvm.relay.var(
+        "dyn_topk_data", shape=inputs[0].checked_type.shape, 
dtype=inputs[0].checked_type.dtype
+    )
+    k = tvm.relay.var(
+        "dyn_topk_k", shape=inputs[1].checked_type.shape, 
dtype=inputs[1].checked_type.dtype
+    )
+    sort_out = tvm.relay.sort(data, axis=attrs.axis, is_ascend=attrs.is_ascend)
+    argsort_out = tvm.relay.argsort(
+        data, axis=attrs.axis, is_ascend=attrs.is_ascend, dtype=attrs.dtype
+    )
+    dshape = tvm.relay.shape_of(data)
+
+    axis = tvm.relay.const([attrs.axis])
+
+    zero = tvm.relay.const([0])
+    one = tvm.relay.const([1])
+    rank = tvm.relay.shape_of(dshape)
+
+    normalized_axis = tvm.relay.where(axis < zero, axis + rank, axis)

Review comment:
       Would these ops impact  the performance?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to