With this change we now have as many clear and move entities as we
have sdma engines (limited to TTM_NUM_MOVE_FENCES).

To enable load-balancing this patch gives access to all sdma
schedulers to all entities.

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
Reviewed-by: Christian König <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b233bcc61ec0..f4304f061d7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2359,8 +2359,6 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                return;
 
        if (enable) {
-               struct drm_gpu_scheduler *sched;
-
                if (!adev->mman.num_buffer_funcs_scheds) {
                        dev_warn(adev->dev, "Not enabling DMA transfers for in 
kernel use");
                        return;
@@ -2368,11 +2366,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 
                num_clear_entities = MIN(adev->mman.num_buffer_funcs_scheds, 
TTM_NUM_MOVE_FENCES);
                num_move_entities = MIN(adev->mman.num_buffer_funcs_scheds, 
TTM_NUM_MOVE_FENCES);
-               sched = adev->mman.buffer_funcs_scheds[0];
                r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
                                                  &adev->mman.default_entity,
                                                  DRM_SCHED_PRIORITY_KERNEL,
-                                                 &sched, 1, 0);
+                                                 
adev->mman.buffer_funcs_scheds, 1, 0);
                if (r < 0) {
                        dev_err(adev->dev,
                                "Failed setting up TTM entity (%d)\n", r);
@@ -2390,8 +2387,11 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 
                for (i = 0; i < num_clear_entities; i++) {
                        r = amdgpu_ttm_buffer_entity_init(
-                               &adev->mman.gtt_mgr, 
&adev->mman.clear_entities[i],
-                               DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 1);
+                               &adev->mman.gtt_mgr,
+                               &adev->mman.clear_entities[i],
+                               DRM_SCHED_PRIORITY_NORMAL,
+                               adev->mman.buffer_funcs_scheds,
+                               adev->mman.num_buffer_funcs_scheds, 1);
 
                        if (r < 0) {
                                for (j = 0; j < i; j++)
@@ -2410,7 +2410,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                        r = amdgpu_ttm_buffer_entity_init(
                                &adev->mman.gtt_mgr,
                                &adev->mman.move_entities[i],
-                               DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 2);
+                               DRM_SCHED_PRIORITY_NORMAL,
+                               adev->mman.buffer_funcs_scheds,
+                               adev->mman.num_buffer_funcs_scheds, 2);
 
                        if (r < 0) {
                                for (j = 0; j < i; j++)
-- 
2.43.0

Reply via email to