From: Leo Li <sunpeng...@amd.com>

[Why]

dc_exit_ips_for_hw_access() is the same as
dc_allow_idle_optimizations(), but with a check on whether IPS is
supported by the ASIC.

[How]

Let's also pipe it through the dm function introduced by the previous
change.

No functional changes are intended.

Signed-off-by: Leo Li <sunpeng...@amd.com>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ++++++++++++-------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2efa9f6e23015..c99cff3650f14 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3092,7 +3092,7 @@ static int dm_resume(void *handle)
        struct dc_commit_streams_params commit_params = {};
 
        if (dm->dc->caps.ips_support) {
-               dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+               dm_allow_idle_optimizations(dm->dc, false);
        }
 
        if (amdgpu_in_reset(adev)) {
@@ -3141,7 +3141,8 @@ static int dm_resume(void *handle)
 
                commit_params.streams = dc_state->streams;
                commit_params.stream_count = dc_state->stream_count;
-               dc_exit_ips_for_hw_access(dm->dc);
+               if (dm->dc->caps.ips_support)
+                       dm_allow_idle_optimizations(dm->dc, false);
                WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
 
                dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -3214,7 +3215,8 @@ static int dm_resume(void *handle)
                        emulated_link_detect(aconnector->dc_link);
                } else {
                        mutex_lock(&dm->dc_lock);
-                       dc_exit_ips_for_hw_access(dm->dc);
+                       if (dm->dc->caps.ips_support)
+                               dm_allow_idle_optimizations(dm->dc, false);
                        dc_link_detect(aconnector->dc_link, 
DETECT_REASON_RESUMEFROMS3S4);
                        mutex_unlock(&dm->dc_lock);
                }
@@ -3589,7 +3591,8 @@ static void handle_hpd_irq_helper(struct 
amdgpu_dm_connector *aconnector)
                        drm_kms_helper_connector_hotplug_event(connector);
        } else {
                mutex_lock(&adev->dm.dc_lock);
-               dc_exit_ips_for_hw_access(dc);
+               if (dc->caps.ips_support)
+                       dm_allow_idle_optimizations(dc, false);
                ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
                mutex_unlock(&adev->dm.dc_lock);
                if (ret) {
@@ -3739,7 +3742,8 @@ static void handle_hpd_rx_irq(void *param)
                        bool ret = false;
 
                        mutex_lock(&adev->dm.dc_lock);
-                       dc_exit_ips_for_hw_access(dc);
+                       if (dc->caps.ips_support)
+                               dm_allow_idle_optimizations(dc, false);
                        ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
                        mutex_unlock(&adev->dm.dc_lock);
 
@@ -4946,7 +4950,8 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
                        bool ret = false;
 
                        mutex_lock(&dm->dc_lock);
-                       dc_exit_ips_for_hw_access(dm->dc);
+                       if (dm->dc->caps.ips_support)
+                               dm_allow_idle_optimizations(dm->dc, false);
                        ret = dc_link_detect(link, DETECT_REASON_BOOT);
                        mutex_unlock(&dm->dc_lock);
 
@@ -9349,7 +9354,8 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
                        memset(&position, 0, sizeof(position));
                        mutex_lock(&dm->dc_lock);
-                       dc_exit_ips_for_hw_access(dm->dc);
+                       if (dm->dc->caps.ips_support)
+                               dm_allow_idle_optimizations(dm->dc, false);
                        
dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
                        mutex_unlock(&dm->dc_lock);
                }
@@ -9424,7 +9430,8 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
        dm_enable_per_frame_crtc_master_sync(dc_state);
        mutex_lock(&dm->dc_lock);
-       dc_exit_ips_for_hw_access(dm->dc);
+       if (dm->dc->caps.ips_support)
+               dm_allow_idle_optimizations(dm->dc, false);
        WARN_ON(!dc_commit_streams(dm->dc, &params));
 
        /* Allow idle optimization when vblank count is 0 for display off */
@@ -9793,7 +9800,8 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
                     sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
 
                mutex_lock(&dm->dc_lock);
-               dc_exit_ips_for_hw_access(dm->dc);
+               if (dm->dc->caps.ips_support)
+                       dm_allow_idle_optimizations(dm->dc, false);
                dc_update_planes_and_stream(dm->dc,
                                            dummy_updates,
                                            status->plane_count,
@@ -12159,8 +12167,9 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device 
*dev)
 
 static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
 {
-       if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
-               dc_exit_ips_for_hw_access(dc);
+       if (dc->ctx->dmub_srv && dc->caps.ips_support &&
+           !dc->ctx->dmub_srv->idle_exit_counter)
+               dm_allow_idle_optimizations(dc, false);
 }
 
 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
-- 
2.46.0

Reply via email to