The function dcn3x_calculate_wm_and_dlg and
dcn30_calculate_wm_and_dlg_fp require access to FPU operation; for this
reason, this commit moves this function to the fpu directory.

Signed-off-by: Rodrigo Siqueira <rodrigo.sique...@amd.com>
---
 .../drm/amd/display/dc/dcn30/dcn30_resource.c | 181 +-----------------
 .../drm/amd/display/dc/dcn30/dcn30_resource.h |   5 -
 .../amd/display/dc/dcn301/dcn301_resource.c   |   2 +-
 .../amd/display/dc/dcn302/dcn302_resource.c   |   2 +-
 .../display/dc/fpu_operation/dcn3x_commons.c  | 177 +++++++++++++++++
 .../display/dc/fpu_operation/dcn3x_commons.h  |   3 +
 6 files changed, 183 insertions(+), 187 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index c97533b4ad09..4edebee00095 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1938,185 +1938,6 @@ static noinline bool dcn30_internal_validate_bw(
        return out;
 }
 
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_calculate_wm_and_dlg_fp(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel)
-{
-       int i, pipe_idx;
-       double dcfclk = 
context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
-       bool pstate_en = 
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
 !=
-                       dm_dram_clock_change_unsupported;
-
-       if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
-               dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-       pipes[0].clks_cfg.socclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
-       /* Set B:
-        * DCFCLK: 1GHz or min required above 1GHz
-        * FCLK/UCLK: Max
-        */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
-               if (vlevel == 0) {
-                       pipes[0].clks_cfg.voltage = 1;
-                       pipes[0].clks_cfg.dcfclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
-               }
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
-       }
-       context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
-       /* Set D:
-        * DCFCLK: Min Required
-        * FCLK(proportional to UCLK): 1GHz or Max
-        * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
-        */
-       /*
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
-       }
-       context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       */
-
-       /* Set C:
-        * DCFCLK: Min Required
-        * FCLK(proportional to UCLK): 1GHz or Max
-        * pstate latency overridden to 5us
-        */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
-               unsigned int min_dram_speed_mts = 
context->bw_ctx.dml.vba.DRAMSpeed;
-               unsigned int min_dram_speed_mts_margin = 160;
-
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
-
-               if 
(context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
 == dm_dram_clock_change_unsupported)
-                       min_dram_speed_mts = 
dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries
 - 1].memclk_mhz * 16;
-
-               for (i = 3; i > 0; i--) {
-                       if ((min_dram_speed_mts + min_dram_speed_mts_margin > 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
-                                       (min_dram_speed_mts - 
min_dram_speed_mts_margin < 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
-                               
context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
-               }
-
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
-       }
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
-       if (!pstate_en) {
-               /* The only difference between A and C is p-state latency, if 
p-state is not supported we want to
-                * calculate DLG based on dummy p-state latency, and max out 
the set A p-state watermark
-                */
-               context->bw_ctx.bw.dcn.watermarks.a = 
context->bw_ctx.bw.dcn.watermarks.c;
-               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
-       } else {
-               /* Set A:
-                * DCFCLK: Min Required
-                * FCLK(proportional to UCLK): 1GHz or Max
-                *
-                * Set A calculated last so that following calculations are 
based on Set A
-                */
-               if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
-                       context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-                       context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
-                       context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
-               }
-               context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       }
-
-       context->perf_params.stutter_period_us = 
context->bw_ctx.dml.vba.StutterPeriod;
-
-       /* Make set D = set A until set D is enabled */
-       context->bw_ctx.bw.dcn.watermarks.d = 
context->bw_ctx.bw.dcn.watermarks.a;
-
-       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
-               if (!context->res_ctx.pipe_ctx[i].stream)
-                       continue;
-
-               pipes[pipe_idx].clks_cfg.dispclk_mhz = 
get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
-               pipes[pipe_idx].clks_cfg.dppclk_mhz = 
get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
-               if (dc->config.forced_clocks) {
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
-               }
-               if (dc->debug.min_disp_clk_khz > 
pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = 
dc->debug.min_disp_clk_khz / 1000.0;
-               if (dc->debug.min_dpp_clk_khz > 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = 
dc->debug.min_dpp_clk_khz / 1000.0;
-
-               pipe_idx++;
-       }
-
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-
-       if (!pstate_en)
-               /* Restore full p-state latency */
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
-                               
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-void dcn30_calculate_wm_and_dlg(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel)
-{
-       DC_FP_START();
-       dcn30_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
-       DC_FP_END();
-}
-
 bool dcn30_validate_bandwidth(struct dc *dc,
                struct dc_state *context,
                bool fast_validate)
@@ -2317,7 +2138,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = 
{
        .link_enc_create = dcn30_link_encoder_create,
        .panel_cntl_create = dcn30_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
-       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
+       .calculate_wm_and_dlg = dcn3x_calculate_wm_and_dlg,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index 42960574cce9..694ae4e4e88e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -44,11 +44,6 @@ struct resource_pool *dcn30_create_resource_pool(
 
 bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
                bool fast_validate);
-void dcn30_calculate_wm_and_dlg(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel);
 
 int dcn30_populate_dml_pipes_from_context(
        struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 8369b009a853..934df194db38 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1625,7 +1625,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
        .link_enc_create = dcn301_link_encoder_create,
        .panel_cntl_create = dcn301_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
-       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
+       .calculate_wm_and_dlg = dcn3x_calculate_wm_and_dlg,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 0554a3dd3214..539757ec3348 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -1235,7 +1235,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
                .link_enc_create = dcn302_link_encoder_create,
                .panel_cntl_create = dcn302_panel_cntl_create,
                .validate_bandwidth = dcn30_validate_bandwidth,
-               .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
+               .calculate_wm_and_dlg = dcn3x_calculate_wm_and_dlg,
                .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
                .acquire_idle_pipe_for_layer = 
dcn20_acquire_idle_pipe_for_layer,
                .add_stream_to_ctx = dcn30_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.c 
b/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.c
index 06e9bd6d4d28..c3b06a8cf765 100644
--- a/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.c
+++ b/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.c
@@ -42,6 +42,7 @@
  *   kernel_fpu_begin/end macros.
  */
 
+#include "clk_mgr.h"
 #include "resource.h"
 #include "dml/dcn30/display_mode_vba_30.h"
 
@@ -213,6 +214,182 @@ static void _dcn3x_set_mcif_arb_params(struct dc *dc, 
struct dc_state *context,
 
 }
 
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline void _dcn30_calculate_wm_and_dlg_fp(struct dc *dc,
+               struct dc_state *context, display_e2e_pipe_params_st *pipes,
+               int pipe_cnt, int vlevel)
+{
+       int i, pipe_idx;
+       double dcfclk = 
context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+       bool pstate_en = 
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
 !=
+                       dm_dram_clock_change_unsupported;
+
+       if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+               dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+       pipes[0].clks_cfg.voltage = vlevel;
+       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+       pipes[0].clks_cfg.socclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+       /* Set B:
+        * DCFCLK: 1GHz or min required above 1GHz
+        * FCLK/UCLK: Max
+        */
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+               if (vlevel == 0) {
+                       pipes[0].clks_cfg.voltage = 1;
+                       pipes[0].clks_cfg.dcfclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+               }
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+       }
+       context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+       pipes[0].clks_cfg.voltage = vlevel;
+       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+       /* Set D:
+        * DCFCLK: Min Required
+        * FCLK(proportional to UCLK): 1GHz or Max
+        * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
+        */
+       /*
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+       }
+       context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       */
+
+       /* Set C:
+        * DCFCLK: Min Required
+        * FCLK(proportional to UCLK): 1GHz or Max
+        * pstate latency overridden to 5us
+        */
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+               unsigned int min_dram_speed_mts = 
context->bw_ctx.dml.vba.DRAMSpeed;
+               unsigned int min_dram_speed_mts_margin = 160;
+
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
+
+               if 
(context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
 == dm_dram_clock_change_unsupported)
+                       min_dram_speed_mts = 
dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries
 - 1].memclk_mhz * 16;
+
+               for (i = 3; i > 0; i--) {
+                       if ((min_dram_speed_mts + min_dram_speed_mts_margin > 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
+                                       (min_dram_speed_mts - 
min_dram_speed_mts_margin < 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
+                               
context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+               }
+
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+       }
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+       if (!pstate_en) {
+               /* The only difference between A and C is p-state latency, if 
p-state is not supported we want to
+                * calculate DLG based on dummy p-state latency, and max out 
the set A p-state watermark
+                */
+               context->bw_ctx.bw.dcn.watermarks.a = 
context->bw_ctx.bw.dcn.watermarks.c;
+               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+       } else {
+               /* Set A:
+                * DCFCLK: Min Required
+                * FCLK(proportional to UCLK): 1GHz or Max
+                *
+                * Set A calculated last so that following calculations are 
based on Set A
+                */
+               if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+                       context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+                       context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+                       context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+               }
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = 
get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = 
get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 
get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = 
get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       }
+
+       context->perf_params.stutter_period_us = 
context->bw_ctx.dml.vba.StutterPeriod;
+
+       /* Make set D = set A until set D is enabled */
+       context->bw_ctx.bw.dcn.watermarks.d = 
context->bw_ctx.bw.dcn.watermarks.a;
+
+       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+               if (!context->res_ctx.pipe_ctx[i].stream)
+                       continue;
+
+               pipes[pipe_idx].clks_cfg.dispclk_mhz = 
get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+               pipes[pipe_idx].clks_cfg.dppclk_mhz = 
get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+               if (dc->config.forced_clocks) {
+                       pipes[pipe_idx].clks_cfg.dispclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+                       pipes[pipe_idx].clks_cfg.dppclk_mhz = 
context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+               }
+               if (dc->debug.min_disp_clk_khz > 
pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+                       pipes[pipe_idx].clks_cfg.dispclk_mhz = 
dc->debug.min_disp_clk_khz / 1000.0;
+               if (dc->debug.min_dpp_clk_khz > 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+                       pipes[pipe_idx].clks_cfg.dppclk_mhz = 
dc->debug.min_dpp_clk_khz / 1000.0;
+
+               pipe_idx++;
+       }
+
+       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+       if (!pstate_en)
+               /* Restore full p-state latency */
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+                               
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+}
+
+void dcn3x_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
+                               display_e2e_pipe_params_st *pipes,
+                               int pipe_cnt,
+                               int vlevel)
+{
+       DC_FP_START();
+       _dcn30_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+       DC_FP_END();
+}
+
 void dcn3x_set_mcif_arb_params(struct dc *dc, struct dc_state *context,
                display_e2e_pipe_params_st *pipes,
                int pipe_cnt)
diff --git a/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.h 
b/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.h
index b3b6d8a66c28..7521498bbd3b 100644
--- a/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.h
+++ b/drivers/gpu/drm/amd/display/dc/fpu_operation/dcn3x_commons.h
@@ -32,4 +32,7 @@ void dcn3x_populate_dml_writeback_from_context(struct dc *dc,
 void dcn3x_set_mcif_arb_params(struct dc *dc, struct dc_state *context,
        display_e2e_pipe_params_st *pipes, int pipe_cnt);
 
+void dcn3x_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
+       display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel);
+
 #endif /* _DCN3X_COMMONS_H_ */
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to