The CSA and EOP buffers have different alignement requirements. Replace the hardcoded sizes with proper queries to the IPs.
Signed-off-by: Alex Deucher <[email protected]> --- drivers/gpu/drm/amd/amdgpu/mes_userqueue.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index ab392de2a2388..17a386969b663 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -281,6 +281,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; + struct amdgpu_gfx_shadow_info shadow_info; + + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); if (mqd_user->mqd_size != sizeof(*compute_mqd)) { DRM_ERROR("Invalid compute IP MQD size\n"); @@ -296,7 +299,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, } r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va, - 2048); + shadow_info.eop_size); if (r) goto free_mqd; @@ -343,6 +346,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, kfree(mqd_gfx_v11); } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; + struct amdgpu_sdma_csa_info csa; + + adev->sdma.get_csa_info(adev, &csa); if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) { DRM_ERROR("Invalid SDMA MQD\n"); @@ -357,7 +363,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va, - 32); + csa.size); if (r) goto free_mqd; -- 2.51.0
