From: Likun Gao <likun....@amd.com>

abstract the function of amdgpu_gfx_rlc_enter/exit_safe_mode,
amdgpu_gfx_rlc_fini and some part of rlc_init to improve the reusability of RLC.

Signed-off-by: Likun Gao <likun....@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c            | 201 ++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h            |  16 +-
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |   6 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c              |  36 +---
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c              | 158 +++-------------
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c              | 210 ++++++---------------
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c              | 197 +++++--------------
 drivers/gpu/drm/amd/amdgpu/kv_dpm.c                |   6 +-
 .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c   |  12 +-
 .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c |  36 ++--
 10 files changed, 378 insertions(+), 500 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1a656b8..7821768 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -22,6 +22,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
+#include <linux/firmware.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
@@ -412,3 +413,203 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool 
enable)
 
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+{
+       if (adev->gfx.rlc.in_safe_mode)
+               return;
+
+       /* if RLC is not enabled, do nothing */
+       if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+               return;
+
+       if (adev->cg_flags &
+           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+            AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+               adev->gfx.rlc.funcs->set_safe_mode(adev);
+               adev->gfx.rlc.in_safe_mode = true;
+       }
+}
+
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+{
+       if (!(adev->gfx.rlc.in_safe_mode))
+               return;
+
+       /* if RLC is not enabled, do nothing */
+       if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+               return;
+
+       if (adev->cg_flags &
+           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+            AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+               adev->gfx.rlc.funcs->unset_safe_mode(adev);
+               adev->gfx.rlc.in_safe_mode = false;
+       }
+}
+
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+       const u32 *src_ptr;
+       volatile u32 *dst_ptr;
+       u32 i;
+       int r;
+
+       /* allocate save restore block */
+       r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &adev->gfx.rlc.save_restore_obj,
+                                     &adev->gfx.rlc.save_restore_gpu_addr,
+                                     (void **)&adev->gfx.rlc.sr_ptr);
+       if (r) {
+               dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+               amdgpu_gfx_rlc_fini(adev);
+               return r;
+       }
+
+       /* write the sr buffer */
+       src_ptr = adev->gfx.rlc.reg_list;
+       dst_ptr = adev->gfx.rlc.sr_ptr;
+       for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+               dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+       amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+       amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+       return 0;
+}
+
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+       volatile u32 *dst_ptr;
+       u32 dws;
+       int r;
+
+       /* allocate clear state block */
+       adev->gfx.rlc.clear_state_size = dws = 
adev->gfx.rlc.funcs->get_csb_size(adev);
+       r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &adev->gfx.rlc.clear_state_obj,
+                                     &adev->gfx.rlc.clear_state_gpu_addr,
+                                     (void **)&adev->gfx.rlc.cs_ptr);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+               amdgpu_gfx_rlc_fini(adev);
+               return r;
+       }
+
+       /* set up the cs buffer */
+       dst_ptr = adev->gfx.rlc.cs_ptr;
+       adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+       amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+       amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+       return 0;
+}
+
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+                                     &adev->gfx.rlc.cp_table_obj,
+                                     &adev->gfx.rlc.cp_table_gpu_addr,
+                                     (void **)&adev->gfx.rlc.cp_table_ptr);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+               amdgpu_gfx_rlc_fini(adev);
+               return r;
+       }
+
+       /* set up the cp table */
+       amdgpu_gfx_rlc_init_cp_table(adev);
+       amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+       amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+       return 0;
+}
+
+void amdgpu_gfx_rlc_init_cp_table(struct amdgpu_device *adev)
+{
+       const __le32 *fw_data;
+       volatile u32 *dst_ptr;
+       int me, i, max_me;
+       u32 bo_offset = 0;
+       u32 table_offset, table_size;
+
+       max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+       /* write the cp table buffer */
+       dst_ptr = adev->gfx.rlc.cp_table_ptr;
+       for (me = 0; me < max_me; me++) {
+               if (me == 0) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.ce_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.ce_fw->data +
+                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 1) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.pfp_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.pfp_fw->data +
+                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 2) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.me_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.me_fw->data +
+                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 3) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec_fw->data +
+                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else  if (me == 4) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec2_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec2_fw->data +
+                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               }
+
+               for (i = 0; i < table_size; i ++) {
+                       dst_ptr[bo_offset + i] =
+                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + 
i]));
+               }
+
+               bo_offset += table_size;
+       }
+}
+
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+       /* save restore block */
+       if (adev->gfx.rlc.save_restore_obj) {
+               amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+                                     &adev->gfx.rlc.save_restore_gpu_addr,
+                                     (void **)&adev->gfx.rlc.sr_ptr);
+       }
+
+       /* clear state block */
+       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+                             &adev->gfx.rlc.clear_state_gpu_addr,
+                             (void **)&adev->gfx.rlc.cs_ptr);
+
+       /* jump table block */
+       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+                             &adev->gfx.rlc.cp_table_gpu_addr,
+                             (void **)&adev->gfx.rlc.cp_table_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 0a7c285..0435f66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -39,10 +39,13 @@
 
 
 struct amdgpu_rlc_funcs {
-       void (*enter_safe_mode)(struct amdgpu_device *adev);
-       void (*exit_safe_mode)(struct amdgpu_device *adev);
+       bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+       void (*set_safe_mode)(struct amdgpu_device *adev);
+       void (*unset_safe_mode)(struct amdgpu_device *adev);
        int  (*init)(struct amdgpu_device *adev);
-       void (*fini)(struct amdgpu_device *adev);
+       u32  (*get_csb_size)(struct amdgpu_device *adev);
+       void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 
*buffer);
+       int  (*get_cp_table_num)(struct amdgpu_device *adev);
        int  (*resume)(struct amdgpu_device *adev);
        void (*stop)(struct amdgpu_device *adev);
        void (*reset)(struct amdgpu_device *adev);
@@ -364,5 +367,12 @@ void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, 
int bit,
 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
                                     int pipe, int queue);
 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_init_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c 
b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 79220a9..86e14c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, 
bool enable)
 
        if (pi->caps_sq_ramping || pi->caps_db_ramping ||
            pi->caps_td_ramping || pi->caps_tcp_ramping) {
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
 
                if (enable) {
                        ret = ci_program_pt_config_registers(adev, 
didt_config_ci);
                        if (ret) {
-                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+                               amdgpu_gfx_rlc_exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                ci_do_enable_didt(adev, enable);
 
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 2082347..1dc3013 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -2351,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring 
*ring,
        amdgpu_ring_write(ring, val);
 }
 
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 {
        const u32 *src_ptr;
        volatile u32 *dst_ptr;
-       u32 dws, i;
+       u32 dws;
        u64 reg_list_mc_addr;
        const struct cs_section_def *cs_data;
        int r;
@@ -2377,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (src_ptr) {
-               /* save restore block */
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.save_restore_obj,
-                                             
&adev->gfx.rlc.save_restore_gpu_addr,
-                                             (void **)&adev->gfx.rlc.sr_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
-                                r);
-                       adev->gfx.rlc.funcs->fini(adev);
+               /* init save restore block */
+               r = amdgpu_gfx_rlc_init_sr(adev, dws);
+               if (r)
                        return r;
-               }
-
-               /* write the sr buffer */
-               dst_ptr = adev->gfx.rlc.sr_ptr;
-               for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-                       dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-
-               amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
        }
 
        if (cs_data) {
@@ -2411,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
                                              (void **)&adev->gfx.rlc.cs_ptr);
                if (r) {
                        dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-                       adev->gfx.rlc.funcs->fini(adev);
+                       amdgpu_gfx_rlc_fini(adev);
                        return r;
                }
 
@@ -3060,7 +3037,6 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = 
{
 
 static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
        .init = gfx_v6_0_rlc_init,
-       .fini = gfx_v6_0_rlc_fini,
        .resume = gfx_v6_0_rlc_resume,
        .stop = gfx_v6_0_rlc_stop,
        .reset = gfx_v6_0_rlc_reset,
@@ -3158,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle)
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
-       adev->gfx.rlc.funcs->fini(adev);
+       amdgpu_gfx_rlc_fini(adev);
 
        return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d8e2ad8..f467b9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
 
 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 
*buffer);
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 
@@ -3252,18 +3251,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring 
*ring,
  * The RLC is a multi-purpose microengine that handles a
  * variety of functions.
  */
-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 {
        const u32 *src_ptr;
-       volatile u32 *dst_ptr;
-       u32 dws, i;
+       u32 dws;
        const struct cs_section_def *cs_data;
        int r;
 
@@ -3290,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (src_ptr) {
-               /* save restore block */
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.save_restore_obj,
-                                             
&adev->gfx.rlc.save_restore_gpu_addr,
-                                             (void **)&adev->gfx.rlc.sr_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create, pin or map of RLC sr 
bo failed\n", r);
-                       adev->gfx.rlc.funcs->fini(adev);
+               /* init save restore block */
+               r = amdgpu_gfx_rlc_init_sr(adev, dws);
+               if (r)
                        return r;
-               }
-
-               /* write the sr buffer */
-               dst_ptr = adev->gfx.rlc.sr_ptr;
-               for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-                       dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-               amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
        }
 
        if (cs_data) {
-               /* clear state block */
-               adev->gfx.rlc.clear_state_size = dws = 
gfx_v7_0_get_csb_size(adev);
-
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.clear_state_obj,
-                                             
&adev->gfx.rlc.clear_state_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cs_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-                       adev->gfx.rlc.funcs->fini(adev);
+               /* init clear state block */
+               r = amdgpu_gfx_rlc_init_csb(adev);
+               if (r)
                        return r;
-               }
-
-               /* set up the cs buffer */
-               dst_ptr = adev->gfx.rlc.cs_ptr;
-               gfx_v7_0_get_csb_buffer(adev, dst_ptr);
-               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
        if (adev->gfx.rlc.cp_table_size) {
-
-               r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.cp_table_obj,
-                                             &adev->gfx.rlc.cp_table_gpu_addr,
-                                             (void 
**)&adev->gfx.rlc.cp_table_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC cp table bo 
failed\n", r);
-                       adev->gfx.rlc.funcs->fini(adev);
+               r = amdgpu_gfx_rlc_init_cpt(adev);
+               if (r)
                        return r;
-               }
-
-               gfx_v7_0_init_cp_pg_table(adev);
-
-               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
        }
 
        return 0;
@@ -3430,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
        return orig;
 }
 
-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+{
+       return true;
+}
+
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp, i, mask;
 
@@ -3452,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct 
amdgpu_device *adev)
        }
 }
 
-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp;
 
@@ -3768,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device 
*adev, bool enable)
                WREG32(mmRLC_PG_CNTL, data);
 }
 
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
 {
-       const __le32 *fw_data;
-       volatile u32 *dst_ptr;
-       int me, i, max_me = 4;
-       u32 bo_offset = 0;
-       u32 table_offset, table_size;
-
        if (adev->asic_type == CHIP_KAVERI)
-               max_me = 5;
-
-       if (adev->gfx.rlc.cp_table_ptr == NULL)
-               return;
-
-       /* write the cp table buffer */
-       dst_ptr = adev->gfx.rlc.cp_table_ptr;
-       for (me = 0; me < max_me; me++) {
-               if (me == 0) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.ce_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.ce_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 1) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.pfp_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.pfp_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 2) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.me_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.me_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 3) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec2_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec2_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               }
-
-               for (i = 0; i < table_size; i ++) {
-                       dst_ptr[bo_offset + i] =
-                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + 
i]));
-               }
-
-               bo_offset += table_size;
-       }
+               return 5;
+       else
+               return 4;
 }
 
 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4272,10 +4165,13 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs 
= {
 };
 
 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
-       .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
-       .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode,
+       .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v7_0_set_safe_mode,
+       .unset_safe_mode = gfx_v7_0_unset_safe_mode,
        .init = gfx_v7_0_rlc_init,
-       .fini = gfx_v7_0_rlc_fini,
+       .get_csb_size = gfx_v7_0_get_csb_size,
+       .get_csb_buffer = gfx_v7_0_get_csb_buffer,
+       .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
        .resume = gfx_v7_0_rlc_resume,
        .stop = gfx_v7_0_rlc_stop,
        .reset = gfx_v7_0_rlc_reset,
@@ -4594,7 +4490,7 @@ static int gfx_v7_0_sw_fini(void *handle)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
        gfx_v7_0_cp_compute_fini(adev);
-       adev->gfx.rlc.funcs->fini(adev);
+       amdgpu_gfx_rlc_fini(adev);
        gfx_v7_0_mec_fini(adev);
        amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
                                &adev->gfx.rlc.clear_state_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7dbcb2e..cb066a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1283,81 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct 
amdgpu_device *adev,
        buffer[count++] = cpu_to_le32(0);
 }
 
-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-       const __le32 *fw_data;
-       volatile u32 *dst_ptr;
-       int me, i, max_me = 4;
-       u32 bo_offset = 0;
-       u32 table_offset, table_size;
-
        if (adev->asic_type == CHIP_CARRIZO)
-               max_me = 5;
-
-       /* write the cp table buffer */
-       dst_ptr = adev->gfx.rlc.cp_table_ptr;
-       for (me = 0; me < max_me; me++) {
-               if (me == 0) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.ce_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.ce_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 1) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.pfp_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.pfp_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 2) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.me_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.me_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 3) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else  if (me == 4) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec2_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec2_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               }
-
-               for (i = 0; i < table_size; i ++) {
-                       dst_ptr[bo_offset + i] =
-                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + 
i]));
-               }
-
-               bo_offset += table_size;
-       }
-}
-
-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+               return 5;
+       else
+               return 4;
 }
 
 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 {
-       volatile u32 *dst_ptr;
-       u32 dws;
        const struct cs_section_def *cs_data;
        int r;
 
@@ -1366,44 +1301,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (cs_data) {
-               /* clear state block */
-               adev->gfx.rlc.clear_state_size = dws = 
gfx_v8_0_get_csb_size(adev);
-
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.clear_state_obj,
-                                             
&adev->gfx.rlc.clear_state_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cs_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-                       adev->gfx.rlc.funcs->fini(adev);
+               /* init clear state block */
+               r = amdgpu_gfx_rlc_init_csb(adev);
+               if (r)
                        return r;
-               }
-
-               /* set up the cs buffer */
-               dst_ptr = adev->gfx.rlc.cs_ptr;
-               gfx_v8_0_get_csb_buffer(adev, dst_ptr);
-               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
        if ((adev->asic_type == CHIP_CARRIZO) ||
            (adev->asic_type == CHIP_STONEY)) {
                adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 
1024); /* JT + GDS */
-               r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.cp_table_obj,
-                                             &adev->gfx.rlc.cp_table_gpu_addr,
-                                             (void 
**)&adev->gfx.rlc.cp_table_ptr);
-               if (r) {
-                       dev_warn(adev->dev, "(%d) create RLC cp table bo 
failed\n", r);
+               r = amdgpu_gfx_rlc_init_cpt(adev);
+               if (r)
                        return r;
-               }
-
-               cz_init_cp_jump_table(adev);
-
-               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
        }
 
        return 0;
@@ -2166,7 +2075,7 @@ static int gfx_v8_0_sw_fini(void *handle)
        amdgpu_gfx_kiq_fini(adev);
 
        gfx_v8_0_mec_fini(adev);
-       adev->gfx.rlc.funcs->fini(adev);
+       amdgpu_gfx_rlc_fini(adev);
        amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
                                &adev->gfx.rlc.clear_state_gpu_addr,
                                (void **)&adev->gfx.rlc.cs_ptr);
@@ -4951,7 +4860,7 @@ static int gfx_v8_0_hw_fini(void *handle)
                pr_debug("For SRIOV client, shouldn't do anything.\n");
                return 0;
        }
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
        if (!gfx_v8_0_wait_for_idle(adev))
                gfx_v8_0_cp_enable(adev, false);
        else
@@ -4960,7 +4869,7 @@ static int gfx_v8_0_hw_fini(void *handle)
                adev->gfx.rlc.funcs->stop(adev);
        else
                pr_err("rlc is busy, skip halt rlc\n");
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
        return 0;
 }
 
@@ -5423,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                                AMD_PG_SUPPORT_RLC_SMU_HS |
                                AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_GFX_DMG))
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
        case CHIP_STONEY:
@@ -5477,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                                AMD_PG_SUPPORT_RLC_SMU_HS |
                                AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_GFX_DMG))
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        return 0;
 }
 
@@ -5571,57 +5480,53 @@ static void gfx_v8_0_send_serdes_cmd(struct 
amdgpu_device *adev,
 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
 
-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-       u32 data;
-       unsigned i;
+       uint32_t rlc_setting;
 
-       data = RREG32(mmRLC_CNTL);
-       if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
+       rlc_setting = RREG32(mmRLC_CNTL);
+       if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+               return false;
 
-       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | 
AMD_CG_SUPPORT_GFX_MGCG)) {
-               data |= RLC_SAFE_MODE__CMD_MASK;
-               data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-               data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-               WREG32(mmRLC_SAFE_MODE, data);
+       return true;
+}
 
-               for (i = 0; i < adev->usec_timeout; i++) {
-                       if ((RREG32(mmRLC_GPM_STAT) &
-                            (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-                             RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
-                           (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-                            RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
-                               break;
-                       udelay(1);
-               }
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+{
+       uint32_t data;
+       unsigned i;
+       data = RREG32(mmRLC_CNTL);
+       data |= RLC_SAFE_MODE__CMD_MASK;
+       data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+       data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+       WREG32(mmRLC_SAFE_MODE, data);
 
-               for (i = 0; i < adev->usec_timeout; i++) {
-                       if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), 
RLC_SAFE_MODE, CMD))
-                               break;
-                       udelay(1);
-               }
-               adev->gfx.rlc.in_safe_mode = true;
+       /* wait for RLC_SAFE_MODE */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if ((RREG32(mmRLC_GPM_STAT) &
+                    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+                     RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+                   (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+                    RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+                       break;
+               udelay(1);
+       }
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+                       break;
+               udelay(1);
        }
 }
 
-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
 {
-       u32 data = 0;
+       uint32_t data;
        unsigned i;
 
        data = RREG32(mmRLC_CNTL);
-       if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
-
-       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | 
AMD_CG_SUPPORT_GFX_MGCG)) {
-               if (adev->gfx.rlc.in_safe_mode) {
-                       data |= RLC_SAFE_MODE__CMD_MASK;
-                       data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-                       WREG32(mmRLC_SAFE_MODE, data);
-                       adev->gfx.rlc.in_safe_mode = false;
-               }
-       }
+       data |= RLC_SAFE_MODE__CMD_MASK;
+       data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+       WREG32(mmRLC_SAFE_MODE, data);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
@@ -5631,10 +5536,13 @@ static void iceland_exit_rlc_safe_mode(struct 
amdgpu_device *adev)
 }
 
 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
-       .enter_safe_mode = iceland_enter_rlc_safe_mode,
-       .exit_safe_mode = iceland_exit_rlc_safe_mode,
+       .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v8_0_set_safe_mode,
+       .unset_safe_mode = gfx_v8_0_unset_safe_mode,
        .init = gfx_v8_0_rlc_init,
-       .fini = gfx_v8_0_rlc_fini,
+       .get_csb_size = gfx_v8_0_get_csb_size,
+       .get_csb_buffer = gfx_v8_0_get_csb_buffer,
+       .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
        .resume = gfx_v8_0_rlc_resume,
        .stop = gfx_v8_0_rlc_stop,
        .reset = gfx_v8_0_rlc_reset,
@@ -5646,7 +5554,7 @@ static void 
gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
 {
        uint32_t temp, data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        /* It is disabled by HW by default */
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5742,7 +5650,7 @@ static void 
gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
                gfx_v8_0_wait_for_rlc_serdes(adev);
        }
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device 
*adev,
@@ -5752,7 +5660,7 @@ static void 
gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
        temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
                temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5835,7 +5743,7 @@ static void 
gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
        gfx_v8_0_wait_for_rlc_serdes(adev);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
                                            bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index cbfdf49..07ea885 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1050,85 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device 
*adev, bool enable)
        WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
 }
 
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-       const __le32 *fw_data;
-       volatile u32 *dst_ptr;
-       int me, i, max_me = 5;
-       u32 bo_offset = 0;
-       u32 table_offset, table_size;
-
-       /* write the cp table buffer */
-       dst_ptr = adev->gfx.rlc.cp_table_ptr;
-       for (me = 0; me < max_me; me++) {
-               if (me == 0) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.ce_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.ce_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 1) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.pfp_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.pfp_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 2) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.me_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.me_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else if (me == 3) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               } else  if (me == 4) {
-                       const struct gfx_firmware_header_v1_0 *hdr =
-                               (const struct gfx_firmware_header_v1_0 
*)adev->gfx.mec2_fw->data;
-                       fw_data = (const __le32 *)
-                               (adev->gfx.mec2_fw->data +
-                                
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       table_offset = le32_to_cpu(hdr->jt_offset);
-                       table_size = le32_to_cpu(hdr->jt_size);
-               }
-
-               for (i = 0; i < table_size; i ++) {
-                       dst_ptr[bo_offset + i] =
-                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + 
i]));
-               }
-
-               bo_offset += table_size;
-       }
-}
-
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
-{
-       /* clear state block */
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
-                       &adev->gfx.rlc.clear_state_gpu_addr,
-                       (void **)&adev->gfx.rlc.cs_ptr);
-
-       /* jump table block */
-       amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
-                       &adev->gfx.rlc.cp_table_gpu_addr,
-                       (void **)&adev->gfx.rlc.cp_table_ptr);
+       return 5;
 }
 
 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 {
-       volatile u32 *dst_ptr;
-       u32 dws;
        const struct cs_section_def *cs_data;
        int r;
 
@@ -1137,45 +1065,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
        cs_data = adev->gfx.rlc.cs_data;
 
        if (cs_data) {
-               /* clear state block */
-               adev->gfx.rlc.clear_state_size = dws = 
gfx_v9_0_get_csb_size(adev);
-               r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-                                             AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.clear_state_obj,
-                                             
&adev->gfx.rlc.clear_state_gpu_addr,
-                                             (void **)&adev->gfx.rlc.cs_ptr);
-               if (r) {
-                       dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
-                               r);
-                       adev->gfx.rlc.funcs->fini(adev);
+               /* init clear state block */
+               r = amdgpu_gfx_rlc_init_csb(adev);
+               if (r)
                        return r;
-               }
-               /* set up the cs buffer */
-               dst_ptr = adev->gfx.rlc.cs_ptr;
-               gfx_v9_0_get_csb_buffer(adev, dst_ptr);
-               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
        if (adev->asic_type == CHIP_RAVEN) {
                /* TODO: double check the cp_table_size for RV */
                adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 
1024); /* JT + GDS */
-               r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                             &adev->gfx.rlc.cp_table_obj,
-                                             &adev->gfx.rlc.cp_table_gpu_addr,
-                                             (void 
**)&adev->gfx.rlc.cp_table_ptr);
-               if (r) {
-                       dev_err(adev->dev,
-                               "(%d) failed to create cp table bo\n", r);
-                       adev->gfx.rlc.funcs->fini(adev);
+               r = amdgpu_gfx_rlc_init_cpt(adev);
+               if (r)
                        return r;
-               }
-
-               rv_init_cp_jump_table(adev);
-               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
        }
 
        switch (adev->asic_type) {
@@ -3584,64 +3485,47 @@ static int gfx_v9_0_late_init(void *handle)
        return 0;
 }
 
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-       uint32_t rlc_setting, data;
-       unsigned i;
-
-       if (adev->gfx.rlc.in_safe_mode)
-               return;
+       uint32_t rlc_setting;
 
        /* if RLC is not enabled, do nothing */
        rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
        if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
-
-       if (adev->cg_flags &
-           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
-            AMD_CG_SUPPORT_GFX_3D_CGCG)) {
-               data = RLC_SAFE_MODE__CMD_MASK;
-               data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-               WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+               return false;
 
-               /* wait for RLC_SAFE_MODE */
-               for (i = 0; i < adev->usec_timeout; i++) {
-                       if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, 
mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
-                               break;
-                       udelay(1);
-               }
-               adev->gfx.rlc.in_safe_mode = true;
-       }
+       return true;
 }
 
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
 {
-       uint32_t rlc_setting, data;
-
-       if (!adev->gfx.rlc.in_safe_mode)
-               return;
+       uint32_t data;
+       unsigned i;
 
-       /* if RLC is not enabled, do nothing */
-       rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
-       if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-               return;
+       data = RLC_SAFE_MODE__CMD_MASK;
+       data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+       WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
 
-       if (adev->cg_flags &
-           (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-               /*
-                * Try to exit safe mode only if it is already in safe
-                * mode.
-                */
-               data = RLC_SAFE_MODE__CMD_MASK;
-               WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
-               adev->gfx.rlc.in_safe_mode = false;
+       /* wait for RLC_SAFE_MODE */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), 
RLC_SAFE_MODE, CMD))
+                       break;
+               udelay(1);
        }
 }
 
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
+{
+       uint32_t data;
+
+       data = RLC_SAFE_MODE__CMD_MASK;
+       WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+}
+
 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
                                                bool enable)
 {
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
                gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3652,7 +3536,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct 
amdgpu_device *adev,
                gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
        }
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3750,7 +3634,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct 
amdgpu_device *adev,
 {
        uint32_t data, def;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        /* Enable 3D CGCG/CGLS */
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
@@ -3790,7 +3674,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct 
amdgpu_device *adev,
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
        }
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device 
*adev,
@@ -3798,7 +3682,7 @@ static void 
gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 {
        uint32_t def, data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -3838,7 +3722,7 @@ static void 
gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
        }
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -3867,10 +3751,13 @@ static int gfx_v9_0_update_gfx_clock_gating(struct 
amdgpu_device *adev,
 }
 
 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
-       .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
-       .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode,
+       .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v9_0_set_safe_mode,
+       .unset_safe_mode = gfx_v9_0_unset_safe_mode,
        .init = gfx_v9_0_rlc_init,
-       .fini = gfx_v9_0_rlc_fini,
+       .get_csb_size = gfx_v9_0_get_csb_size,
+       .get_csb_buffer = gfx_v9_0_get_csb_buffer,
+       .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
        .resume = gfx_v9_0_rlc_resume,
        .stop = gfx_v9_0_rlc_stop,
        .reset = gfx_v9_0_rlc_reset,
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c 
b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d0e478f..0c9a2c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, 
bool enable)
            pi->caps_db_ramping ||
            pi->caps_td_ramping ||
            pi->caps_tcp_ramping) {
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
 
                if (enable) {
                        ret = kv_program_pt_config_registers(adev, 
didt_config_kv);
                        if (ret) {
-                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+                               amdgpu_gfx_rlc_exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                kv_do_enable_didt(adev, enable);
 
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 5e19f59..d138ddae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
            PP_CAP(PHM_PlatformCaps_TDRamping) ||
            PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
                mutex_lock(&adev->grbm_idx_mutex);
                value = 0;
                value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
                                        "Failed to enable DPM DIDT.", goto 
error);
                }
                mutex_unlock(&adev->grbm_idx_mutex);
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
 error:
        mutex_unlock(&adev->grbm_idx_mutex);
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
        return result;
 }
 
@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
            PP_CAP(PHM_PlatformCaps_TDRamping) ||
            PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-               adev->gfx.rlc.funcs->enter_safe_mode(adev);
+               amdgpu_gfx_rlc_enter_safe_mode(adev);
 
                result = smu7_enable_didt(hwmgr, false);
                PP_ASSERT_WITH_CODE((result == 0),
@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
                        PP_ASSERT_WITH_CODE((0 == result),
                                        "Failed to disable DPM DIDT.", goto 
error);
                }
-               adev->gfx.rlc.funcs->exit_safe_mode(adev);
+               amdgpu_gfx_rlc_exit_safe_mode(adev);
        }
 
        return 0;
 error:
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
        return result;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 2d88abf..6f26cb2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct 
pp_hwmgr *hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (count = 0; count < num_se; count++) {
@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct 
pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -971,11 +971,11 @@ static int 
vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr 
*hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (count = 0; count < num_se; count++) {
@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct 
pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        vega10_program_gc_didt_config_registers(hwmgr, 
GCDiDtDroopCtrlConfig_vega10);
        if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct 
pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
        uint32_t data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
                data = 0x00000000;
@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr 
*hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (count = 0; count < num_se; count++) {
@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr 
*hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr 
*hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct 
pp_hwmgr *hwmgr)
 
        num_se = adev->gfx.config.max_shader_engines;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_program_gc_didt_config_registers(hwmgr, 
AvfsPSMResetConfig_vega10);
 
@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct 
pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, true);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        vega10_program_gc_didt_config_registers(hwmgr, 
PSMGCEDCDroopCtrlConfig_vega10);
 
@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct 
pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
        uint32_t data;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
                data = 0x00000000;
@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct 
pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
        int result;
 
-       adev->gfx.rlc.funcs->enter_safe_mode(adev);
+       amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        mutex_lock(&adev->grbm_idx_mutex);
        WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct 
pp_hwmgr *hwmgr)
 
        vega10_didt_set_mask(hwmgr, false);
 
-       adev->gfx.rlc.funcs->exit_safe_mode(adev);
+       amdgpu_gfx_rlc_exit_safe_mode(adev);
 
        return 0;
 }
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to