From: "Jiadong.Zhu" <jiadong....@amd.com>

Set register to enable mcbp according to amdgpu_mcbp.
Add sdma preempt_ib function used for debugfs test.
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 53 ++++++++++++++++++++++++++
 1 file changed, 53 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index d35f18536da2..bc69af4b4ada 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1502,6 +1502,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
                /* set utc l1 enable flag always to 1 */
                temp = RREG32_SDMA(i, mmSDMA0_CNTL);
                temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+               if (amdgpu_mcbp){
+                       /* enable MCBP */
+                       temp = REG_SET_FIELD(temp, SDMA0_CNTL, 
MIDCMD_PREEMPT_ENABLE, 1);
+               }
                WREG32_SDMA(i, mmSDMA0_CNTL, temp);
 
                if (!amdgpu_sriov_vf(adev)) {
@@ -2102,6 +2107,53 @@ static int sdma_v4_0_soft_reset(void *handle)
        return 0;
 }
 
+static int sdma_v4_0_ring_preempt_ib(struct amdgpu_ring *ring)
+{
+       int i, r = 0;
+       struct amdgpu_device *adev = ring->adev;
+       u32 index = 0;
+       u64 sdma_gfx_preempt;
+
+       amdgpu_sdma_get_index_from_ring(ring, &index);
+       if (index == 0)
+               sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
+       else
+               sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
+
+       /* assert preemption condition */
+       amdgpu_ring_set_preempt_cond_exec(ring, false);
+
+       /* emit the trailing fence */
+       ring->trail_seq += 1;
+       amdgpu_ring_alloc(ring, 10);
+       sdma_v4_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
+                                 ring->trail_seq, 0);
+       amdgpu_ring_commit(ring);
+
+       /* assert IB preemption */
+       WREG32(sdma_gfx_preempt, 1);
+
+       /* poll the trailing fence */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (ring->trail_seq ==
+                   le32_to_cpu(*(ring->trail_fence_cpu_addr)))
+                       break;
+               udelay(1);
+       }
+
+       if (i >= adev->usec_timeout) {
+               r = -EINVAL;
+               DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
+       }
+
+       /* deassert IB preemption */
+       WREG32(sdma_gfx_preempt, 0);
+
+       /* deassert the preemption condition */
+       amdgpu_ring_set_preempt_cond_exec(ring, true);
+       return r;
+}
+
 static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        unsigned type,
@@ -2435,6 +2487,7 @@ static const struct amdgpu_ring_funcs 
sdma_v4_0_ring_funcs = {
        .emit_wreg = sdma_v4_0_ring_emit_wreg,
        .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+       .preempt_ib = sdma_v4_0_ring_preempt_ib,
 };
 
 /*
-- 
2.25.1

Reply via email to