Write the fence after we reset the ring and use an IB
test to validate the reset.  This is safe since we
have enforce isolation legacy enabled by default.

Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 20 +++++++++++++++++---
 1 file changed, 17 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 855aeb7b1a89d..ee97ca472ac71 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -4457,6 +4457,7 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring 
*ring, u64 addr,
 {
        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
        bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+       bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
 
        /* RELEASE_MEM - flush caches, send int */
        amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
@@ -4464,7 +4465,8 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring 
*ring, u64 addr,
                                 PACKET3_RELEASE_MEM_GCR_GL2_WB |
                                 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
                                 
PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
-                                PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
+                                PACKET3_RELEASE_MEM_EVENT_INDEX(5) |
+                                (exec ? PACKET3_RELEASE_MEM_EXECUTE_GFX11 : 
0)));
        amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 
1) |
                                 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
 
@@ -5344,7 +5346,13 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, 
unsigned int vmid)
                return r;
        }
 
-       return amdgpu_ring_test_ring(ring);
+       if (amdgpu_ring_alloc(ring, 8))
+               return -ENOMEM;
+       amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+                              ring->fence_drv.sync_seq, 0);
+       amdgpu_ring_commit(ring);
+
+       return gfx_v12_0_ring_test_ib(ring, AMDGPU_QUEUE_RESET_TIMEOUT);
 }
 
 static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
@@ -5457,7 +5465,13 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, 
unsigned int vmid)
                return r;
        }
 
-       return amdgpu_ring_test_ring(ring);
+       if (amdgpu_ring_alloc(ring, 8))
+               return -ENOMEM;
+       amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+                              ring->fence_drv.sync_seq, 0);
+       amdgpu_ring_commit(ring);
+
+       return gfx_v12_0_ring_test_ib(ring, AMDGPU_QUEUE_RESET_TIMEOUT);
 }
 
 static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
-- 
2.49.0

Reply via email to