Thanks to "drm/ttm: rework pipelined eviction fence handling", ttm
can deal correctly with moves and evictions being executed from
different contexts.

---
v4: use u32 for the index in the entities array
---

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
Reviewed-by: Christian König <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 13 +++++++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 56b4f560ea7f..91fcf4f08181 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -387,9 +387,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+       struct amdgpu_ttm_buffer_entity *entity;
        struct amdgpu_copy_mem src, dst;
        struct dma_fence *fence = NULL;
        int r;
+       u32 e;
 
        src.bo = bo;
        dst.bo = bo;
@@ -398,8 +400,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        src.offset = 0;
        dst.offset = 0;
 
+       e = atomic_inc_return(&adev->mman.next_move_entity) %
+                             adev->mman.num_move_entities;
+       entity = &adev->mman.move_entities[e];
+
        r = amdgpu_ttm_copy_mem_to_mem(adev,
-                                      &adev->mman.move_entities[0],
+                                      entity,
                                       &src, &dst,
                                       new_mem->size,
                                       amdgpu_bo_encrypted(abo),
@@ -411,9 +417,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        if (old_mem->mem_type == TTM_PL_VRAM &&
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
-
-               r = amdgpu_fill_buffer(&adev->mman.move_entities[0],
-                                      abo, 0, NULL, &wipe_fence,
+               r = amdgpu_fill_buffer(entity, abo, 0, NULL, &wipe_fence,
                                       AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
                        goto error;
@@ -2403,6 +2407,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                }
 
                adev->mman.num_move_entities = num_move_entities;
+               atomic_set(&adev->mman.next_move_entity, 0);
                for (i = 0; i < num_move_entities; i++) {
                        r = amdgpu_ttm_buffer_entity_init(
                                &adev->mman.gtt_mgr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index cf32db3defb1..3b1973611446 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -76,6 +76,7 @@ struct amdgpu_mman {
        atomic_t next_clear_entity;
        u32 num_clear_entities;
        struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES];
+       atomic_t next_move_entity;
        u32 num_move_entities;
 
        struct amdgpu_vram_mgr vram_mgr;
-- 
2.43.0

Reply via email to