[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Emily Deng <emily.d...@amd.com>

Best wishes
Emily Deng
>-----Original Message-----
>From: amd-gfx <amd-gfx-boun...@lists.freedesktop.org> On Behalf Of Yintian
>Tao
>Sent: Monday, March 30, 2020 4:50 PM
>To: Koenig, Christian <christian.koe...@amd.com>; Deucher, Alexander
><alexander.deuc...@amd.com>
>Cc: amd-gfx@lists.freedesktop.org; Tao, Yintian <yintian....@amd.com>
>Subject: [PATCH] drm/amdgpu: skip access sdma_v5_0 registers under SRIOV
>
>Due to the new L1.0b0c011b policy, many SDMA registers are blocked which
>raise the violation warning. There are total 6 pair register needed to be 
>skipped
>when driver init and de-init.
>mmSDMA0/1_CNTL
>mmSDMA0/1_F32_CNTL
>mmSDMA0/1_UTCL1_PAGE
>mmSDMA0/1_UTCL1_CNTL
>mmSDMA0/1_CHICKEN_BITS,
>mmSDMA0/1_SEM_WAIT_FAIL_TIMER_CNTL
>
>Signed-off-by: Yintian Tao <yt...@amd.com>
>Change-Id: I9d5087582ceb5f629d37bf856533d00c179e6de3
>---
> drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 110 +++++++++++++++++--------
> 1 file changed, 75 insertions(+), 35 deletions(-)
>
>diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
>b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
>index b3c30616d6b4..d7c0269059b0 100644
>--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
>+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
>@@ -88,6 +88,29 @@ static const struct soc15_reg_golden
>golden_settings_sdma_5[] = {
>       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE,
>0x00ffffff, 0x000c5c00)  };
>
>+static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
>+      SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC7_RB_WPTR_POLL_CNTL,
>+0xfffffff7, 0x00403000), };
>+
> static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
>       SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
>       SOC15_REG_GOLDEN_VALUE(GC, 0,
>mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), @@ -141,9
>+164,14 @@ static void sdma_v5_0_init_golden_registers(struct
>amdgpu_device *adev)
>                                               (const
>u32)ARRAY_SIZE(golden_settings_sdma_nv14));
>               break;
>       case CHIP_NAVI12:
>-              soc15_program_register_sequence(adev,
>-                                              golden_settings_sdma_5,
>-                                              (const
>u32)ARRAY_SIZE(golden_settings_sdma_5));
>+              if (amdgpu_sriov_vf(adev))
>+                      soc15_program_register_sequence(adev,
>+
>       golden_settings_sdma_5_sriov,
>+                                                      (const
>u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
>+              else
>+                      soc15_program_register_sequence(adev,
>+
>       golden_settings_sdma_5,
>+                                                      (const
>u32)ARRAY_SIZE(golden_settings_sdma_5));
>               soc15_program_register_sequence(adev,
>                                               golden_settings_sdma_nv12,
>                                               (const
>u32)ARRAY_SIZE(golden_settings_sdma_nv12));
>@@ -557,9 +585,12 @@ static void sdma_v5_0_ctx_switch_enable(struct
>amdgpu_device *adev, bool enable)
>       }
>
>       for (i = 0; i < adev->sdma.num_instances; i++) {
>-              f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_CNTL));
>-              f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
>-                              AUTO_CTXSW_ENABLE, enable ? 1 : 0);
>+              if (!amdgpu_sriov_vf(adev)) {
>+                      f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_CNTL));
>+                      f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
>+                                               AUTO_CTXSW_ENABLE,
>enable ? 1 : 0);
>+              }
>+
>               if (enable && amdgpu_sdma_phase_quantum) {
>                       WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_PHASE0_QUANTUM),
>                              phase_quantum);
>@@ -568,7 +599,8 @@ static void sdma_v5_0_ctx_switch_enable(struct
>amdgpu_device *adev, bool enable)
>                       WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_PHASE2_QUANTUM),
>                              phase_quantum);
>               }
>-              WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL),
>f32_cntl);
>+              if (!amdgpu_sriov_vf(adev))
>+                      WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_CNTL), f32_cntl);
>       }
>
> }
>@@ -591,6 +623,9 @@ static void sdma_v5_0_enable(struct amdgpu_device
>*adev, bool enable)
>               sdma_v5_0_rlc_stop(adev);
>       }
>
>+      if (amdgpu_sriov_vf(adev))
>+              return;
>+
>       for (i = 0; i < adev->sdma.num_instances; i++) {
>               f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_F32_CNTL));
>               f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT,
>enable ? 0 : 1); @@ -623,7 +658,8 @@ static int sdma_v5_0_gfx_resume(struct
>amdgpu_device *adev)
>               ring = &adev->sdma.instance[i].ring;
>               wb_offset = (ring->rptr_offs * 4);
>
>-              WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
>+              if (!amdgpu_sriov_vf(adev))
>+                      WREG32(sdma_v5_0_get_reg_offset(adev, i,
>+mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
>
>               /* Set ring buffer size in dwords */
>               rb_bufsz = order_base_2(ring->ring_size / 4); @@ -699,26
>+735,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
>               /* set minor_ptr_update to 0 after wptr programed */
>               WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
>
>-              /* set utc l1 enable flag always to 1 */
>-              temp = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_CNTL));
>-              temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE,
>1);
>-
>-              /* enable MCBP */
>-              temp = REG_SET_FIELD(temp, SDMA0_CNTL,
>MIDCMD_PREEMPT_ENABLE, 1);
>-              WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL),
>temp);
>-
>-              /* Set up RESP_MODE to non-copy addresses */
>-              temp = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_CNTL));
>-              temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL,
>RESP_MODE, 3);
>-              temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL,
>REDO_DELAY, 9);
>-              WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_CNTL), temp);
>-
>-              /* program default cache read and write policy */
>-              temp = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_PAGE));
>-              /* clean read policy and write policy bits */
>-              temp &= 0xFF0FFF;
>-              temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
>(CACHE_WRITE_POLICY_L2__DEFAULT << 14));
>-              WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_PAGE), temp);
>+              if (!amdgpu_sriov_vf(adev)) {
>+                      /* set utc l1 enable flag always to 1 */
>+                      temp = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_CNTL));
>+                      temp = REG_SET_FIELD(temp, SDMA0_CNTL,
>UTC_L1_ENABLE, 1);
>+
>+                      /* enable MCBP */
>+                      temp = REG_SET_FIELD(temp, SDMA0_CNTL,
>MIDCMD_PREEMPT_ENABLE, 1);
>+                      WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_CNTL), temp);
>+
>+                      /* Set up RESP_MODE to non-copy addresses */
>+                      temp = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_CNTL));
>+                      temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL,
>RESP_MODE, 3);
>+                      temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL,
>REDO_DELAY, 9);
>+                      WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_CNTL), temp);
>+
>+                      /* program default cache read and write policy */
>+                      temp = RREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_PAGE));
>+                      /* clean read policy and write policy bits */
>+                      temp &= 0xFF0FFF;
>+                      temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
>(CACHE_WRITE_POLICY_L2__DEFAULT << 14));
>+                      WREG32(sdma_v5_0_get_reg_offset(adev, i,
>mmSDMA0_UTCL1_PAGE), temp);
>+              }
>
>               if (!amdgpu_sriov_vf(adev)) {
>                       /* unhalt engine */
>@@ -1388,14 +1426,16 @@ static int sdma_v5_0_set_trap_irq_state(struct
>amdgpu_device *adev,  {
>       u32 sdma_cntl;
>
>-      u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
>-              sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
>-              sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
>+      if (!amdgpu_sriov_vf(adev)) {
>+              u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
>+                      sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
>+                      sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
>
>-      sdma_cntl = RREG32(reg_offset);
>-      sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
>-                     state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
>-      WREG32(reg_offset, sdma_cntl);
>+              sdma_cntl = RREG32(reg_offset);
>+              sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL,
>TRAP_ENABLE,
>+                                        state ==
>AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
>+              WREG32(reg_offset, sdma_cntl);
>+      }
>
>       return 0;
> }
>--
>2.17.1
>
>_______________________________________________
>amd-gfx mailing list
>amd-gfx@lists.freedesktop.org
>https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.free
>desktop.org%2Fmailman%2Flistinfo%2Famd-
>gfx&amp;data=02%7C01%7CEmily.Deng%40amd.com%7Cd8f4122b4fb7463d56
>af08d7d487526a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C63
>7211550868892241&amp;sdata=I%2BNRIK8zc0Ih%2FW3cijivPB10ZdzQDadgvDd
>Pa7zA77Y%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to