If multiple userq IDR are in use and there is an error handling one
at suspend or resume it will be silently discarded.
Switch the suspend/resume() code to use guards and return immediately.

Signed-off-by: Mario Limonciello <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 25 ++++++++++-------------
 1 file changed, 11 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 3bbe1001fda1..1e9358c70f26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -967,27 +967,25 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
-       int ret = 0, r;
+       int r;
 
        if (!ip_mask)
                return 0;
 
-       mutex_lock(&adev->userq_mutex);
+       guard(mutex)(&adev->userq_mutex);
        list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
                cancel_delayed_work_sync(&uqm->resume_work);
-               mutex_lock(&uqm->userq_mutex);
+               guard(mutex)(&uqm->userq_mutex);
                idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
                        if (adev->in_s0ix)
                                r = amdgpu_userq_preempt_helper(uqm, queue);
                        else
                                r = amdgpu_userq_unmap_helper(uqm, queue);
                        if (r)
-                               ret = r;
+                               return r;
                }
-               mutex_unlock(&uqm->userq_mutex);
        }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
+       return 0;
 }
 
 int amdgpu_userq_resume(struct amdgpu_device *adev)
@@ -996,26 +994,25 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
-       int ret = 0, r;
+       int r;
 
        if (!ip_mask)
                return 0;
 
-       mutex_lock(&adev->userq_mutex);
+       guard(mutex)(&adev->userq_mutex);
        list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
-               mutex_lock(&uqm->userq_mutex);
+               guard(mutex)(&uqm->userq_mutex);
                idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
                        if (adev->in_s0ix)
                                r = amdgpu_userq_restore_helper(uqm, queue);
                        else
                                r = amdgpu_userq_map_helper(uqm, queue);
                        if (r)
-                               ret = r;
+                               return r;
                }
-               mutex_unlock(&uqm->userq_mutex);
        }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
+
+       return 0;
 }
 
 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
-- 
2.51.0

Reply via email to