No functional change for now, as we always allocate a single entity and use it everywhere.
--- v4: stop using adev->sdma.num_instances --- Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 55 +++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 +- 3 files changed, 42 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1fb956400696..bf98be8fd007 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1325,7 +1325,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (r) goto out; - r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, &bo->base._resv, + r = amdgpu_fill_buffer(&adev->mman.clear_entities[0], abo, 0, &bo->base._resv, &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); if (WARN_ON(r)) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0977a10679dc..71316b3d4a29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2346,8 +2346,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) { struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + u32 num_clear_entities; uint64_t size; - int r; + int r, i, j; if (!adev->mman.initialized || amdgpu_in_reset(adev) || adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu) @@ -2362,6 +2363,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; } + num_clear_entities = 1; ring = adev->mman.buffer_funcs_ring; sched = &ring->sched; r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, @@ -2374,14 +2376,28 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; } - r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, - &adev->mman.clear_entity, - DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, 1); - if (r < 0) { - dev_err(adev->dev, - "Failed setting up TTM BO clear entity (%d)\n", r); + adev->mman.clear_entities = kcalloc(num_clear_entities, + sizeof(struct amdgpu_ttm_buffer_entity), + GFP_KERNEL); + if (!adev->mman.clear_entities) goto error_free_default_entity; + + adev->mman.num_clear_entities = num_clear_entities; + + for (i = 0; i < num_clear_entities; i++) { + r = amdgpu_ttm_buffer_entity_init( + &adev->mman.gtt_mgr, &adev->mman.clear_entities[i], + DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 1); + + if (r < 0) { + for (j = 0; j < i; j++) + amdgpu_ttm_buffer_entity_fini( + &adev->mman.gtt_mgr, &adev->mman.clear_entities[j]); + kfree(adev->mman.clear_entities); + adev->mman.num_clear_entities = 0; + adev->mman.clear_entities = NULL; + goto error_free_default_entity; + } } r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, @@ -2391,19 +2407,23 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) if (r < 0) { dev_err(adev->dev, "Failed setting up TTM BO move entity (%d)\n", r); - goto error_free_clear_entity; + goto error_free_clear_entities; } } else { amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, &adev->mman.default_entity); - amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, - &adev->mman.clear_entity); + for (i = 0; i < adev->mman.num_clear_entities; i++) + amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, + &adev->mman.clear_entities[i]); amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, &adev->mman.move_entity); /* Drop all the old fences since re-creating the scheduler entities * will allocate new contexts. */ ttm_resource_manager_cleanup(man); + kfree(adev->mman.clear_entities); + adev->mman.clear_entities = NULL; + adev->mman.num_clear_entities = 0; } /* this just adjusts TTM size idea, which sets lpfn to the correct value */ @@ -2416,9 +2436,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; -error_free_clear_entity: - amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, - &adev->mman.clear_entity); +error_free_clear_entities: + for (i = 0; i < adev->mman.num_clear_entities; i++) + amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, + &adev->mman.clear_entities[i]); + kfree(adev->mman.clear_entities); + adev->mman.clear_entities = NULL; + adev->mman.num_clear_entities = 0; error_free_default_entity: amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, &adev->mman.default_entity); @@ -2568,8 +2592,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, if (!fence) return -EINVAL; - - entity = &adev->mman.clear_entity; + entity = &adev->mman.clear_entities[0]; *fence = dma_fence_get_stub(); amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index bf101215757e..e98d458b8029 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -72,8 +72,9 @@ struct amdgpu_mman { /* @default_entity: for workarounds, has no gart windows */ struct amdgpu_ttm_buffer_entity default_entity; - struct amdgpu_ttm_buffer_entity clear_entity; struct amdgpu_ttm_buffer_entity move_entity; + struct amdgpu_ttm_buffer_entity *clear_entities; + u32 num_clear_entities; struct amdgpu_vram_mgr vram_mgr; struct amdgpu_gtt_mgr gtt_mgr; -- 2.43.0
