Start to use the scheduler load balancing for userspace compute command submissions.
Signed-off-by: Christian König <christian.koe...@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 59046f68975a..0fdccc41af72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -49,7 +49,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) { struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; - unsigned i, j, num_sdma_rqs; + struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS]; + unsigned i, j, num_sdma_rqs, num_comp_rqs; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -82,6 +83,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; num_sdma_rqs = 0; + num_comp_rqs = 0; for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; struct drm_sched_rq *rq; @@ -89,6 +91,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, rq = &ring->sched.sched_rq[priority]; if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) sdma_rqs[num_sdma_rqs++] = rq; + else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + comp_rqs[num_comp_rqs++] = rq; } /* create context entity for each ring */ @@ -102,6 +106,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, r = drm_sched_entity_init(&ctx->rings[i].entity, sdma_rqs, num_sdma_rqs, &ctx->guilty); + } else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + r = drm_sched_entity_init(&ctx->rings[i].entity, + comp_rqs, num_comp_rqs, + &ctx->guilty); } else { struct drm_sched_rq *rq; -- 2.14.1 _______________________________________________ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx