It avoids duplicated code and allows to output a warning.

---
v4: move check inside the existing if (enable) test
---

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 13 ++++---------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  5 +++++
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 362ab2b34498..98aead91b98b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3158,9 +3158,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device 
*adev)
        if (r)
                goto init_failed;
 
-       if (adev->mman.buffer_funcs_ring &&
-           adev->mman.buffer_funcs_ring->sched.ready)
-               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+       amdgpu_ttm_set_buffer_funcs_status(adev, true);
 
        /* Don't init kfd if whole hive need to be reset during init */
        if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
@@ -4052,8 +4050,7 @@ static int amdgpu_device_ip_resume(struct amdgpu_device 
*adev)
 
        r = amdgpu_device_ip_resume_phase2(adev);
 
-       if (adev->mman.buffer_funcs_ring->sched.ready)
-               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+       amdgpu_ttm_set_buffer_funcs_status(adev, true);
 
        if (r)
                return r;
@@ -5199,8 +5196,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool 
notify_clients)
        return 0;
 
 unwind_evict:
-       if (adev->mman.buffer_funcs_ring->sched.ready)
-               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        amdgpu_fence_driver_hw_init(adev);
 
 unwind_userq:
@@ -5931,8 +5927,7 @@ int amdgpu_device_reinit_after_reset(struct 
amdgpu_reset_context *reset_context)
                                if (r)
                                        goto out;
 
-                               if 
(tmp_adev->mman.buffer_funcs_ring->sched.ready)
-                                       
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+                               amdgpu_ttm_set_buffer_funcs_status(tmp_adev, 
true);
 
                                r = amdgpu_device_ip_resume_phase3(tmp_adev);
                                if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7006c58a6992..a441de678a83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2380,6 +2380,11 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                struct amdgpu_ring *ring;
                struct drm_gpu_scheduler *sched;
 
+               if (!adev->mman.buffer_funcs_ring || 
!adev->mman.buffer_funcs_ring->sched.ready) {
+                       dev_warn(adev->dev, "Not enabling DMA transfers for in 
kernel use");
+                       return;
+               }
+
                ring = adev->mman.buffer_funcs_ring;
                sched = &ring->sched;
                r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
-- 
2.43.0

Reply via email to