In S0i3, GFX state is retained, so it's preferrable to
preempt queues rather than unmapping them as the overhead
is lower.

Signed-off-by: Alex Deucher <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 3b43d062b7e0d..af7753bfa27d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -975,7 +975,10 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
                cancel_delayed_work_sync(&uqm->resume_work);
                mutex_lock(&uqm->userq_mutex);
                idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
-                       r = amdgpu_userq_unmap_helper(uqm, queue);
+                       if (adev->in_s0ix)
+                               r = amdgpu_userq_preempt_helper(uqm, queue);
+                       else
+                               r = amdgpu_userq_unmap_helper(uqm, queue);
                        if (r)
                                ret = r;
                }
@@ -1000,7 +1003,10 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
        list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
                mutex_lock(&uqm->userq_mutex);
                idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
-                       r = amdgpu_userq_map_helper(uqm, queue);
+                       if (adev->in_s0ix)
+                               r = amdgpu_userq_restore_helper(uqm, queue);
+                       else
+                               r = amdgpu_userq_map_helper(uqm, queue);
                        if (r)
                                ret = r;
                }
-- 
2.51.0

Reply via email to