[PATCH] drm/amd/powerplay: determine pm_en at amd_powerplay_create

2020-04-01 Thread Tiecheng Zhou
Need to determine pm_en at amd_powerplay_create of early_init stage.

Signed-off-by: Tiecheng Zhou 
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 3 +++
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c   | 3 ---
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 71b843f542d8..a37dc37dfe49 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -48,6 +48,9 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
+   hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf(adev);
+   hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
+   ? true : false;
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(>smu_lock);
mutex_init(>msg_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index f48fdc7f0382..7aee382fc1f9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -221,9 +221,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 {
int ret = 0;
 
-   hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device 
*)hwmgr->adev);
-   hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
-   ? true : false;
if (!hwmgr->pm_en)
return 0;
 
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] amdgpu/drm: remove psp access on navi10 for sriov

2020-04-01 Thread Felix Kuehling
Am 2020-04-01 um 6:00 p.m. schrieb Alex Sierra:
> Navi ASICs don't require to access through PSP to osssys registers.
> This on SR-IOV configuration.
>
> Signed-off-by: Alex Sierra 

Reviewed-by: Felix Kuehling 


> ---
>  drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 18 +-
>  1 file changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c 
> b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
> index 6fca5206833d..f97857ed3c7e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
> +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
> @@ -49,7 +49,7 @@ static void navi10_ih_enable_interrupts(struct 
> amdgpu_device *adev)
>  
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
> - if (amdgpu_sriov_vf(adev)) {
> + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
>   if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
> ih_rb_cntl)) {
>   DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
>   return;
> @@ -64,7 +64,7 @@ static void navi10_ih_enable_interrupts(struct 
> amdgpu_device *adev)
>   ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
>  RB_ENABLE, 1);
> - if (amdgpu_sriov_vf(adev)) {
> + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
>   if (psp_reg_program(>psp, 
> PSP_REG_IH_RB_CNTL_RING1,
>   ih_rb_cntl)) {
>   DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
> @@ -80,7 +80,7 @@ static void navi10_ih_enable_interrupts(struct 
> amdgpu_device *adev)
>   ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
>  RB_ENABLE, 1);
> - if (amdgpu_sriov_vf(adev)) {
> + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
>   if (psp_reg_program(>psp, 
> PSP_REG_IH_RB_CNTL_RING2,
>   ih_rb_cntl)) {
>   DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
> @@ -106,7 +106,7 @@ static void navi10_ih_disable_interrupts(struct 
> amdgpu_device *adev)
>  
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
> - if (amdgpu_sriov_vf(adev)) {
> + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
>   if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
> ih_rb_cntl)) {
>   DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
>   return;
> @@ -125,7 +125,7 @@ static void navi10_ih_disable_interrupts(struct 
> amdgpu_device *adev)
>   ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
>  RB_ENABLE, 0);
> - if (amdgpu_sriov_vf(adev)) {
> + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
>   if (psp_reg_program(>psp, 
> PSP_REG_IH_RB_CNTL_RING1,
>   ih_rb_cntl)) {
>   DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
> @@ -145,7 +145,7 @@ static void navi10_ih_disable_interrupts(struct 
> amdgpu_device *adev)
>   ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
>  RB_ENABLE, 0);
> - if (amdgpu_sriov_vf(adev)) {
> + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
>   if (psp_reg_program(>psp, 
> PSP_REG_IH_RB_CNTL_RING2,
>   ih_rb_cntl)) {
>   DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
> @@ -253,7 +253,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
>   ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
>  !!adev->irq.msi_enabled);
> - if (amdgpu_sriov_vf(adev)) {
> + if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
>   if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
> ih_rb_cntl)) {
>   DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
>   return -ETIMEDOUT;
> @@ -300,7 +300,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
>  WPTR_OVERFLOW_ENABLE, 0);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, 

[PATCH] amdgpu/drm: remove psp access on navi10 for sriov

2020-04-01 Thread Alex Sierra
Navi ASICs don't require to access through PSP to osssys registers.
This on SR-IOV configuration.

Signed-off-by: Alex Sierra 
---
 drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c 
b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 6fca5206833d..f97857ed3c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -49,7 +49,7 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device 
*adev)
 
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
-   if (amdgpu_sriov_vf(adev)) {
+   if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return;
@@ -64,7 +64,7 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device 
*adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
   RB_ENABLE, 1);
-   if (amdgpu_sriov_vf(adev)) {
+   if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
if (psp_reg_program(>psp, 
PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
@@ -80,7 +80,7 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device 
*adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
   RB_ENABLE, 1);
-   if (amdgpu_sriov_vf(adev)) {
+   if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
if (psp_reg_program(>psp, 
PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
@@ -106,7 +106,7 @@ static void navi10_ih_disable_interrupts(struct 
amdgpu_device *adev)
 
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
-   if (amdgpu_sriov_vf(adev)) {
+   if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return;
@@ -125,7 +125,7 @@ static void navi10_ih_disable_interrupts(struct 
amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
   RB_ENABLE, 0);
-   if (amdgpu_sriov_vf(adev)) {
+   if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
if (psp_reg_program(>psp, 
PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
@@ -145,7 +145,7 @@ static void navi10_ih_disable_interrupts(struct 
amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
   RB_ENABLE, 0);
-   if (amdgpu_sriov_vf(adev)) {
+   if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
if (psp_reg_program(>psp, 
PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
@@ -253,7 +253,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
   !!adev->irq.msi_enabled);
-   if (amdgpu_sriov_vf(adev)) {
+   if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return -ETIMEDOUT;
@@ -300,7 +300,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
   WPTR_OVERFLOW_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
   RB_FULL_DRAIN_ENABLE, 1);
-   if (amdgpu_sriov_vf(adev)) {
+   if 

Re: [PATCH] Revert "drm/amdgpu: call psp to program ih cntl in SR-IOV for Navi"

2020-04-01 Thread Felix Kuehling
Am 2020-04-01 um 5:00 p.m. schrieb Alex Sierra:
> This reverts commit 9e517bb84c282c1f06edded20f16f927426d2e40.
> Navi ASICs don't require to access through PSP to osssys registers.
> This on SR-IOV configuration.

If you submit this change, please don't forget to add a Signed-off-by
line and feel free to add my Reviewed-by.

But I think we don't need to revert it. Just change the condition for
using PSP. See inline ...


> ---
>  drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 89 +++---
>  1 file changed, 9 insertions(+), 80 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c 
> b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
> index 6fca5206833d..4968259a22c4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
> +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
> @@ -49,30 +49,14 @@ static void navi10_ih_enable_interrupts(struct 
> amdgpu_device *adev)
>  
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
> - if (amdgpu_sriov_vf(adev)) {

Just change this condition to if (amdgpu_sriov_vf(adev) &&
adev->asic_type < CHIP_NAVI10) everywhere.

Regards,
  Felix


> - if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
> ih_rb_cntl)) {
> - DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
> - return;
> - }
> - } else {
> - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
> - }
> -
> + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
>   adev->irq.ih.enabled = true;
>  
>   if (adev->irq.ih1.ring_size) {
>   ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
>  RB_ENABLE, 1);
> - if (amdgpu_sriov_vf(adev)) {
> - if (psp_reg_program(>psp, 
> PSP_REG_IH_RB_CNTL_RING1,
> - ih_rb_cntl)) {
> - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
> - return;
> - }
> - } else {
> - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
> - }
> + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
>   adev->irq.ih1.enabled = true;
>   }
>  
> @@ -80,15 +64,7 @@ static void navi10_ih_enable_interrupts(struct 
> amdgpu_device *adev)
>   ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
>  RB_ENABLE, 1);
> - if (amdgpu_sriov_vf(adev)) {
> - if (psp_reg_program(>psp, 
> PSP_REG_IH_RB_CNTL_RING2,
> - ih_rb_cntl)) {
> - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
> - return;
> - }
> - } else {
> - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
> - }
> + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
>   adev->irq.ih2.enabled = true;
>   }
>  }
> @@ -106,15 +82,7 @@ static void navi10_ih_disable_interrupts(struct 
> amdgpu_device *adev)
>  
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
> - if (amdgpu_sriov_vf(adev)) {
> - if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
> ih_rb_cntl)) {
> - DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
> - return;
> - }
> - } else {
> - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
> - }
> -
> + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
>   /* set rptr, wptr to 0 */
>   WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
>   WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
> @@ -125,15 +93,7 @@ static void navi10_ih_disable_interrupts(struct 
> amdgpu_device *adev)
>   ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
>   ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
>  RB_ENABLE, 0);
> - if (amdgpu_sriov_vf(adev)) {
> - if (psp_reg_program(>psp, 
> PSP_REG_IH_RB_CNTL_RING1,
> - ih_rb_cntl)) {
> - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
> - return;
> - }
> - } else {
> - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
> - }
> + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
>   /* set rptr, wptr to 0 */
>   

[PATCH] Revert "drm/amdgpu: call psp to program ih cntl in SR-IOV for Navi"

2020-04-01 Thread Alex Sierra
This reverts commit 9e517bb84c282c1f06edded20f16f927426d2e40.
Navi ASICs don't require to access through PSP to osssys registers.
This on SR-IOV configuration.
---
 drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 89 +++---
 1 file changed, 9 insertions(+), 80 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c 
b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 6fca5206833d..4968259a22c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -49,30 +49,14 @@ static void navi10_ih_enable_interrupts(struct 
amdgpu_device *adev)
 
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
-   if (amdgpu_sriov_vf(adev)) {
-   if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
ih_rb_cntl)) {
-   DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-   return;
-   }
-   } else {
-   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-   }
-
+   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
adev->irq.ih.enabled = true;
 
if (adev->irq.ih1.ring_size) {
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
   RB_ENABLE, 1);
-   if (amdgpu_sriov_vf(adev)) {
-   if (psp_reg_program(>psp, 
PSP_REG_IH_RB_CNTL_RING1,
-   ih_rb_cntl)) {
-   DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-   return;
-   }
-   } else {
-   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
-   }
+   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
adev->irq.ih1.enabled = true;
}
 
@@ -80,15 +64,7 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device 
*adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
   RB_ENABLE, 1);
-   if (amdgpu_sriov_vf(adev)) {
-   if (psp_reg_program(>psp, 
PSP_REG_IH_RB_CNTL_RING2,
-   ih_rb_cntl)) {
-   DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
-   return;
-   }
-   } else {
-   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
-   }
+   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
adev->irq.ih2.enabled = true;
}
 }
@@ -106,15 +82,7 @@ static void navi10_ih_disable_interrupts(struct 
amdgpu_device *adev)
 
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
-   if (amdgpu_sriov_vf(adev)) {
-   if (psp_reg_program(>psp, PSP_REG_IH_RB_CNTL, 
ih_rb_cntl)) {
-   DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
-   return;
-   }
-   } else {
-   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-   }
-
+   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
@@ -125,15 +93,7 @@ static void navi10_ih_disable_interrupts(struct 
amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
   RB_ENABLE, 0);
-   if (amdgpu_sriov_vf(adev)) {
-   if (psp_reg_program(>psp, 
PSP_REG_IH_RB_CNTL_RING1,
-   ih_rb_cntl)) {
-   DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
-   return;
-   }
-   } else {
-   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
-   }
+   WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
@@ -145,15 +105,7 @@ static void navi10_ih_disable_interrupts(struct 
amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
   RB_ENABLE, 0);
-   if (amdgpu_sriov_vf(adev)) {
-  

[PATCH 1/2] drm/amd/display: query hdcp capability during link detect

2020-04-01 Thread Bhawanpreet Lakha
[Why]
Query the hdcp caps of a link, it is useful and can be reported to the user

[How]
Create a query function and call it during link detect

Signed-off-by: Bhawanpreet Lakha 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 56 
 drivers/gpu/drm/amd/display/dc/dc.h   | 41 +
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  3 +
 .../gpu/drm/amd/display/dc/hdcp/hdcp_msg.c| 89 +++
 .../gpu/drm/amd/display/include/hdcp_types.h  |  7 ++
 5 files changed, 196 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a93997ff0419..49c63e27dfe9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -514,6 +514,50 @@ static void link_disconnect_remap(struct dc_sink 
*prev_sink, struct dc_link *lin
link->local_sink = prev_sink;
 }
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+static void query_hdcp_capability(enum signal_type signal, struct dc_link 
*link)
+{
+   struct hdcp_protection_message msg22;
+   struct hdcp_protection_message msg14;
+
+   memset(, 0, sizeof(struct hdcp_protection_message));
+   memset(, 0, sizeof(struct hdcp_protection_message));
+   memset(link->hdcp_caps.rx_caps.raw, 0,
+   sizeof(link->hdcp_caps.rx_caps.raw));
+
+   if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+   link->ddc->transaction_type ==
+   DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+   link->connector_signal == SIGNAL_TYPE_EDP) {
+   msg22.data = link->hdcp_caps.rx_caps.raw;
+   msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+   msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+   } else {
+   msg22.data = >hdcp_caps.rx_caps.fields.version;
+   msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+   msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+   }
+   msg22.version = HDCP_VERSION_22;
+   msg22.link = HDCP_LINK_PRIMARY;
+   msg22.max_retries = 5;
+   dc_process_hdcp_msg(signal, link, );
+
+   if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == 
SIGNAL_TYPE_DISPLAY_PORT_MST) {
+   enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+   msg14.data = >hdcp_caps.bcaps.raw;
+   msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+   msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+   msg14.version = HDCP_VERSION_14;
+   msg14.link = HDCP_LINK_PRIMARY;
+   msg14.max_retries = 5;
+
+   status = dc_process_hdcp_msg(signal, link, );
+   }
+
+}
+#endif
+
 static void read_current_link_settings_on_detect(struct dc_link *link)
 {
union lane_count_set lane_count_set = { {0} };
@@ -606,6 +650,12 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
 
sink_caps->transaction_type);
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+   /* In case of fallback to SST when topology discovery 
below fails
+* HDCP caps will be querried again later by the upper 
layer (caller
+* of this function). */
+   query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, 
link);
+#endif
/*
 * This call will initiate MST topology discovery. Which
 * will detect MST ports and add new DRM connector DRM
@@ -975,6 +1025,9 @@ static bool dc_link_detect_helper(struct dc_link *link,
 * TODO debug why Dell 2413 doesn't like
 *  two link trainings
 */
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+   query_hdcp_capability(sink->sink_signal, link);
+#endif
 
// verify link cap for SST non-seamless boot
if (!perform_dp_seamless_boot)
@@ -988,6 +1041,9 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+   query_hdcp_capability(sink->sink_signal, link);
+#endif
}
 
/* HDMI-DVI Dongle */
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 92123b0d1196..9235d04c32dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,6 +29,9 @@
 #include "dc_types.h"
 #include "grph_object_defs.h"
 #include "logger_types.h"
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+#include "hdcp_types.h"
+#endif
 #include "gpio_types.h"
 #include "link_service_types.h"
 #include "grph_object_ctrl_defs.h"
@@ 

[PATCH 2/2] drm/amd/display: add HDCP caps debugfs

2020-04-01 Thread Bhawanpreet Lakha
Add debugfs to get HDCP capability. This is also useful for
kms_content_protection igt test.

Use:
cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability
cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability

Signed-off-by: Bhawanpreet Lakha 
---
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 61 +++
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 47 ++
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  4 ++
 3 files changed, 112 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0461fecd68db..4b695f6a80c6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -838,6 +838,44 @@ static int vrr_range_show(struct seq_file *m, void *data)
return 0;
 }
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/*
+ * Returns the HDCP capability of the Display (1.4 for now).
+ *
+ * NOTE* Not all HDMI displays report their HDCP caps even when they are 
capable.
+ * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as 
always capable.
+ *
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability
+ * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability
+ */
+static int hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+   struct drm_connector *connector = m->private;
+   struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
+   bool hdcp_cap, hdcp2_cap;
+
+   if (connector->status != connector_status_connected)
+   return -ENODEV;
+
+   seq_printf(m, "%s:%d HDCP version: ", connector->name, 
connector->base.id);
+
+   hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
+   hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+
+
+   if (hdcp_cap)
+   seq_printf(m, "%s ", "HDCP1.4");
+   if (hdcp2_cap)
+   seq_printf(m, "%s ", "HDCP2.2");
+
+   if (!hdcp_cap && !hdcp2_cap)
+   seq_printf(m, "%s ", "None");
+
+   seq_puts(m, "\n");
+
+   return 0;
+}
+#endif
 /* function description
  *
  * generic SDP message access for testing
@@ -964,6 +1002,9 @@ DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 DEFINE_SHOW_ATTRIBUTE(output_bpc);
 DEFINE_SHOW_ATTRIBUTE(vrr_range);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
+#endif
 
 static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
@@ -1019,12 +1060,23 @@ static const struct {
{"test_pattern", _phy_test_pattern_fops},
{"output_bpc", _bpc_fops},
{"vrr_range", _range_fops},
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+   {"hdcp_sink_capability", _sink_capability_fops},
+#endif
{"sdp_message", _message_fops},
{"aux_dpcd_address", _dpcd_address_debugfs_fops},
{"aux_dpcd_size", _dpcd_size_debugfs_fops},
{"aux_dpcd_data", _dpcd_data_debugfs_fops}
 };
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static const struct {
+   char *name;
+   const struct file_operations *fops;
+} hdmi_debugfs_entries[] = {
+   {"hdcp_sink_capability", _sink_capability_fops}
+};
+#endif
 /*
  * Force YUV420 output if available from the given mode
  */
@@ -1093,6 +1145,15 @@ void connector_debugfs_init(struct amdgpu_dm_connector 
*connector)
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+   if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+   for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
+   debugfs_create_file(hdmi_debugfs_entries[i].name,
+   0644, dir, connector,
+   hdmi_debugfs_entries[i].fops);
+   }
+   }
+#endif
 }
 
 /*
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 49c63e27dfe9..e8b5d7a22ce7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -515,6 +515,53 @@ static void link_disconnect_remap(struct dc_sink 
*prev_sink, struct dc_link *lin
 }
 
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
+bool dc_link_is_hdcp14(struct dc_link *link)
+{
+   bool ret = false;
+
+   switch (link->connector_signal) {
+   case SIGNAL_TYPE_DISPLAY_PORT:
+   case SIGNAL_TYPE_DISPLAY_PORT_MST:
+   ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+   break;
+   case SIGNAL_TYPE_DVI_SINGLE_LINK:
+   case SIGNAL_TYPE_DVI_DUAL_LINK:
+   case SIGNAL_TYPE_HDMI_TYPE_A:
+   /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always 
be capable,
+* we can poll for bksv but some displays 

Re: [PATCH] drm/amdkfd: kfree the wrong pointer

2020-04-01 Thread Felix Kuehling
Am 2020-04-01 um 8:12 a.m. schrieb Jack Zhang:
> Originally, it kfrees the wrong pointer for mem_obj.
> It would cause memory leak under stress test.
>
> Signed-off-by: Jack Zhang 

Reviewed-by: Felix Kuehling 

Thanks for catching this. Did you actually run into this code path? In
that case we may also need to increase the GTT memory reserved for this
suballocator.

Thanks,
  Felix

> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> index 7866cd06..0491ab2 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> @@ -1134,9 +1134,9 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned 
> int size,
>   return 0;
>  
>  kfd_gtt_no_free_chunk:
> - pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
> + pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
>   mutex_unlock(>gtt_sa_lock);
> - kfree(mem_obj);
> + kfree(*mem_obj);
>   return -ENOMEM;
>  }
>  
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdkfd: Provide SMI events watch

2020-04-01 Thread Felix Kuehling
Am 2020-04-01 um 9:10 a.m. schrieb Amber Lin:
>
> Thanks Felix for the review. I have a better understanding of how
> kfifo works now and have changed my code quite a bit. Couple of
> questions below inline regarding the gpu_id and data arguments.
>
Replies inline ...


> Thanks.
>
> Amber
>
> On 2020-03-26 4:53 p.m., Felix Kuehling wrote:
>>
>> Hi Amber,
>>
>> I see that this is based on the debugger event code. Jon and I are
>> just working through some issues with that code. The lessons from
>> that will need to be applied to this as well. But I think we can
>> define your API to simplify this a bit.
>>
>> The basic problem is, that we have one Fifo in the kfd_device, but
>> potentially multiple file descriptors referring to it. For the event
>> interface I think we can enforce only a single file descriptor per
>> device. If there is already one, your register call can fail. See
>> more comments inline.
>>
>> On 2020-03-17 13:57, Amber Lin wrote:
>>> When the compute is malfunctioning or performance drops, the system admin
>>> will use SMI (System Management Interface) tool to monitor/diagnostic what
>>> went wrong. This patch provides an event watch interface for the user
>>> space to register events they are interested. After the event is
>>> registered, the user can use annoymous file descriptor's pull function
>>
>> pull -> poll
>>
> Thank you for spotting the typo. I’ll change that.
>
>>> with wait-time specified to wait for the event to happen. Once the event
>>> happens, the user can use read() to retrieve information related to the
>>> event.
>>>
>>> VM fault event is done in this patch.
>>>
>>> Signed-off-by: Amber Lin 
>>> ---
>>>  drivers/gpu/drm/amd/amdkfd/Makefile  |   3 +-
>>>  drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c |   2 +
>>>  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c |  38 ++
>>>  drivers/gpu/drm/amd/amdkfd/kfd_device.c  |   1 +
>>>  drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c  |   2 +
>>>  drivers/gpu/drm/amd/amdkfd/kfd_priv.h|  10 ++
>>>  drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c  | 143 
>>> +++
>>>  drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h  |  41 +++
>>>  include/uapi/linux/kfd_ioctl.h   |  27 -
>>>  9 files changed, 265 insertions(+), 2 deletions(-)
>>>  create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
>>>  create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile 
>>> b/drivers/gpu/drm/amd/amdkfd/Makefile
>>> index 6147462..cc98b4a 100644
>>> --- a/drivers/gpu/drm/amd/amdkfd/Makefile
>>> +++ b/drivers/gpu/drm/amd/amdkfd/Makefile
>>> @@ -53,7 +53,8 @@ AMDKFD_FILES  := $(AMDKFD_PATH)/kfd_module.o \
>>> $(AMDKFD_PATH)/kfd_int_process_v9.o \
>>> $(AMDKFD_PATH)/kfd_dbgdev.o \
>>> $(AMDKFD_PATH)/kfd_dbgmgr.o \
>>> -   $(AMDKFD_PATH)/kfd_crat.o
>>> +   $(AMDKFD_PATH)/kfd_crat.o \
>>> +   $(AMDKFD_PATH)/kfd_smi_events.o
>>>  
>>>  ifneq ($(CONFIG_AMD_IOMMU_V2),)
>>>  AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
>>> diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c 
>>> b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
>>> index 9f59ba9..24b4717 100644
>>> --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
>>> +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
>>> @@ -24,6 +24,7 @@
>>>  #include "kfd_events.h"
>>>  #include "cik_int.h"
>>>  #include "amdgpu_amdkfd.h"
>>> +#include "kfd_smi_events.h"
>>>  
>>>  static bool cik_event_interrupt_isr(struct kfd_dev *dev,
>>> const uint32_t *ih_ring_entry,
>>> @@ -107,6 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
>>> ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
>>> struct kfd_vm_fault_info info;
>>>  
>>> +   kfd_smi_event_update_vmfault(dev, pasid);
>>> kfd_process_vm_fault(dev->dqm, pasid);
>>>  
>>> memset(, 0, sizeof(info));
>>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
>>> b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
>>> index f8fa03a..8e92956 100644
>>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
>>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
>>> @@ -39,6 +39,7 @@
>>>  #include "kfd_device_queue_manager.h"
>>>  #include "kfd_dbgmgr.h"
>>>  #include "amdgpu_amdkfd.h"
>>> +#include "kfd_smi_events.h"
>>>  
>>>  static long kfd_ioctl(struct file *, unsigned int, unsigned long);
>>>  static int kfd_open(struct inode *, struct file *);
>>> @@ -1243,6 +1244,40 @@ static int kfd_ioctl_acquire_vm(struct file *filep, 
>>> struct kfd_process *p,
>>> return ret;
>>>  }
>>>  
>>> +/* Handle requests for watching SMI events */
>>> +static int kfd_ioctl_smi_events(struct file *filep,
>>> +   struct kfd_process *p, void *data)
>>> +{
>>> +   struct kfd_ioctl_smi_events_args *args = data;
>>> +   struct kfd_dev *dev;
>>> +  

Re: [PATCH] drm/amd/display: remove mod_hdcp_hdcp2_get_link_encryption_status()

2020-04-01 Thread Kazlauskas, Nicholas

On 2020-04-01 3:56 p.m., Bhawanpreet Lakha wrote:

It is not being used, so remove it

Signed-off-by: Bhawanpreet Lakha 


Thanks for the follow up!

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/modules/hdcp/hdcp.h   |  2 --
  .../drm/amd/display/modules/hdcp/hdcp_psp.c   | 29 ---
  2 files changed, 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5cb4546be0ef..8e8a26dd46fc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -357,8 +357,6 @@ enum mod_hdcp_status 
mod_hdcp_hdcp2_prepare_stream_management(
struct mod_hdcp *hdcp);
  enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp 
*hdcp,
-  enum 
mod_hdcp_encryption_status *encryption_status);
  
  /* ddc functions */

  enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index aa147e171557..95a9c8bfbef6 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -917,32 +917,3 @@ enum mod_hdcp_status 
mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
return status;
  }
  
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,

-  enum 
mod_hdcp_encryption_status *encryption_status)
-{
-   struct psp_context *psp = hdcp->config.psp.handle;
-   struct ta_hdcp_shared_memory *hdcp_cmd;
-
-   hdcp_cmd = (struct ta_hdcp_shared_memory 
*)psp->hdcp_context.hdcp_shared_buf;
-
-   memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
-   hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = 
hdcp->auth.id;
-   hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
-   hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
-   *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
-   psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
-
-   if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-   return MOD_HDCP_STATUS_FAILURE;
-
-   if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 
1) {
-   if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == 
TA_HDCP2_CONTENT_TYPE__TYPE1)
-   *encryption_status = 
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
-   else
-   *encryption_status = 
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
-   }
-
-   return MOD_HDCP_STATUS_SUCCESS;
-}



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: remove mod_hdcp_hdcp2_get_link_encryption_status()

2020-04-01 Thread Bhawanpreet Lakha
It is not being used, so remove it

Signed-off-by: Bhawanpreet Lakha 
---
 .../gpu/drm/amd/display/modules/hdcp/hdcp.h   |  2 --
 .../drm/amd/display/modules/hdcp/hdcp_psp.c   | 29 ---
 2 files changed, 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5cb4546be0ef..8e8a26dd46fc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -357,8 +357,6 @@ enum mod_hdcp_status 
mod_hdcp_hdcp2_prepare_stream_management(
struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp 
*hdcp,
-  enum 
mod_hdcp_encryption_status *encryption_status);
 
 /* ddc functions */
 enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index aa147e171557..95a9c8bfbef6 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -917,32 +917,3 @@ enum mod_hdcp_status 
mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
return status;
 }
 
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp 
*hdcp,
-  enum 
mod_hdcp_encryption_status *encryption_status)
-{
-   struct psp_context *psp = hdcp->config.psp.handle;
-   struct ta_hdcp_shared_memory *hdcp_cmd;
-
-   hdcp_cmd = (struct ta_hdcp_shared_memory 
*)psp->hdcp_context.hdcp_shared_buf;
-
-   memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
-   hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = 
hdcp->auth.id;
-   hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
-   hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
-   *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
-   psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
-
-   if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-   return MOD_HDCP_STATUS_FAILURE;
-
-   if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 
1) {
-   if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == 
TA_HDCP2_CONTENT_TYPE__TYPE1)
-   *encryption_status = 
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
-   else
-   *encryption_status = 
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
-   }
-
-   return MOD_HDCP_STATUS_SUCCESS;
-}
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[pull] amdgpu, amdkfd drm-next-5.7

2020-04-01 Thread Alex Deucher
Hi Dave, Daniel,

Fixes for 5.7.

The following changes since commit 59e7a8cc2dcf335116d500d684bfb34d1d97a6fe:

  Merge tag 'drm-msm-next-2020-03-22' of https://gitlab.freedesktop.org/drm/msm 
into drm-next (2020-03-31 16:34:55 +1000)

are available in the Git repository at:

  git://people.freedesktop.org/~agd5f/linux tags/amd-drm-next-5.7-2020-04-01

for you to fetch changes up to 3148a6a0ef3cf93570f30a477292768f7eb5d3c3:

  drm/amdkfd: kfree the wrong pointer (2020-04-01 14:44:22 -0400)


amd-drm-next-5.7-2020-04-01:

amdgpu:
- HDCP fixes
- gfx10 fix
- Misc display fixes
- BACO fixes

amdkfd:
- Fix memory leak


Aric Cyr (1):
  drm/amd/display: LFC not working on 2.0x range monitors (v2)

Bhawanpreet Lakha (3):
  drm/amd/display: Don't try hdcp1.4 when content_type is set to type1
  drm/amd/display: Correctly cancel future watchdog and callback events
  drm/amd/display: increase HDCP authentication delay

Dmytro Laktyushkin (1):
  drm/amd/display: Fix dcn21 num_states

Eric Bernstein (1):
  drm/amd/display: Update function to get optimal number of taps

Evan Quan (2):
  drm/amd/powerplay: drop redundant BIF doorbell interrupt operations
  drm/amd/powerplay: move the ASIC specific nbio operation out of 
smu_v11_0.c

Isabel Zhang (1):
  drm/amd/display: Revert change to HDCP display states

Jack Zhang (1):
  drm/amdkfd: kfree the wrong pointer

Kevin Wang (1):
  drm/amdgpu: fix hpd bo size calculation error

Nicholas Kazlauskas (1):
  drm/amd/display: Use double buffered DRR timing update by default

Stylon Wang (3):
  drm/amd/display: Support P010 pixel format
  drm/amd/display: Support plane level CTM
  drm/amd/display: Enable BT2020 in COLOR_ENCODING property

Yongqiang Sun (1):
  drm/amd/display: Not doing optimize bandwidth if flip pending.

 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c  |  2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_device.c|  4 +--
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  | 12 +--
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 14 +---
 drivers/gpu/drm/amd/display/dc/core/dc.c   | 33 +-
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c  |  1 +
 drivers/gpu/drm/amd/display/dc/dc.h|  3 ++
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  6 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c  | 18 ++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h  |  3 ++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |  3 +-
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c  |  5 +--
 .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c  | 40 --
 drivers/gpu/drm/amd/display/dc/dml/dc_features.h   |  2 +-
 .../drm/amd/display/dc/dml/display_mode_structs.h  |  7 ++--
 .../drm/amd/display/modules/freesync/freesync.c| 34 ++
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c|  5 ++-
 drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h| 28 +++
 .../drm/amd/display/modules/hdcp/hdcp1_execution.c |  2 +-
 .../drm/amd/display/modules/hdcp/hdcp2_execution.c |  2 +-
 .../gpu/drm/amd/display/modules/hdcp/hdcp_psp.c| 39 -
 drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h |  1 +
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c   | 15 +++-
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 18 --
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 24 +
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 14 +++-
 27 files changed, 223 insertions(+), 114 deletions(-)
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 0/4] drm/dp_mst: Remove ->destroy_connector() callback

2020-04-01 Thread Alex Deucher
On Tue, Mar 31, 2020 at 4:58 PM Lyude Paul  wrote:
>
> This finishes up the work that Pankaj Bharadiya started in:
>
> https://patchwork.freedesktop.org/series/74412/
>
> And allows us to entirely remove ->destroy_connector()
>
> Lyude Paul (4):
>   drm/amd/amdgpu_dm/mst: Remove unneeded edid assignment when destroying
> connectors
>   drm/amd/amdgpu_dm/mst: Remove ->destroy_connector() callback
>   drm/amd/amdgpu_dm/mst: Stop printing extra messages in
> dm_dp_add_mst_connector()
>   drm/dp_mst: Remove drm_dp_mst_topology_cbs.destroy_connector

I noticed this as well when I was sorting out the load and unload
callback removal.  Thanks for finishing this up.  This series looks
good to me, assuming none of the display folks have any concerns:
Reviewed-by: Alex Deucher 


>
>  .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 45 +--
>  drivers/gpu/drm/drm_dp_mst_topology.c | 16 ++-
>  include/drm/drm_dp_mst_helper.h   |  2 -
>  3 files changed, 15 insertions(+), 48 deletions(-)
>
> --
> 2.25.1
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amd/dc: Kill dc_conn_log_hex_linux()

2020-04-01 Thread Alex Deucher
On Wed, Apr 1, 2020 at 9:00 AM Kazlauskas, Nicholas
 wrote:
>
> On 2020-03-31 5:22 p.m., Lyude Paul wrote:
> > DRM already supports tracing DPCD transactions, there's no reason for
> > the existence of this function. Also, it prints one byte per-line which
> > is way too loud. So, just remove it.
> >
> > Signed-off-by: Lyude Paul 
>
> Thanks for helping clean this up!
>
> Series is:
>
> Reviewed-by: Nicholas Kazlauskas 


Applied the series.  Thanks!

Alex

>
> Regards,
> Nicholas Kazlauskas
>
> > ---
> >   .../gpu/drm/amd/display/dc/basics/Makefile|  3 +-
> >   .../drm/amd/display/dc/basics/log_helpers.c   | 39 ---
> >   .../amd/display/include/logger_interface.h|  4 --
> >   3 files changed, 1 insertion(+), 45 deletions(-)
> >   delete mode 100644 drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile 
> > b/drivers/gpu/drm/amd/display/dc/basics/Makefile
> > index 7ad0cad0f4ef..01b99e0d788e 100644
> > --- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
> > +++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
> > @@ -24,8 +24,7 @@
> >   # It provides the general basic services required by other DAL
> >   # subcomponents.
> >
> > -BASICS = conversion.o fixpt31_32.o \
> > - log_helpers.o vector.o dc_common.o
> > +BASICS = conversion.o fixpt31_32.o vector.o dc_common.o
> >
> >   AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c 
> > b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
> > deleted file mode 100644
> > index 26583f346c39..
> > --- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
> > +++ /dev/null
> > @@ -1,39 +0,0 @@
> > -/*
> > - * Copyright 2012-16 Advanced Micro Devices, Inc.
> > - *
> > - * Permission is hereby granted, free of charge, to any person obtaining a
> > - * copy of this software and associated documentation files (the 
> > "Software"),
> > - * to deal in the Software without restriction, including without 
> > limitation
> > - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> > - * and/or sell copies of the Software, and to permit persons to whom the
> > - * Software is furnished to do so, subject to the following conditions:
> > - *
> > - * The above copyright notice and this permission notice shall be included 
> > in
> > - * all copies or substantial portions of the Software.
> > - *
> > - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
> > OR
> > - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> > - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> > - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> > - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> > - * OTHER DEALINGS IN THE SOFTWARE.
> > - *
> > - * Authors: AMD
> > - *
> > - */
> > -
> > -#include "core_types.h"
> > -#include "logger.h"
> > -#include "include/logger_interface.h"
> > -#include "dm_helpers.h"
> > -
> > -void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
> > -{
> > - int i;
> > -
> > - if (hex_data)
> > - for (i = 0; i < hex_data_count; i++)
> > - DC_LOG_DEBUG("%2.2X ", hex_data[i]);
> > -}
> > -
> > diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h 
> > b/drivers/gpu/drm/amd/display/include/logger_interface.h
> > index 6e008de25629..02c23b04d34b 100644
> > --- a/drivers/gpu/drm/amd/display/include/logger_interface.h
> > +++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
> > @@ -40,8 +40,6 @@ struct dc_state;
> >*
> >*/
> >
> > -void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
> > -
> >   void pre_surface_trace(
> >   struct dc *dc,
> >   const struct dc_plane_state *const *plane_states,
> > @@ -102,14 +100,12 @@ void context_clock_trace(
> >   #define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
> >   do { \
> >   (void)(link); \
> > - dc_conn_log_hex_linux(hex_data, hex_len); \
> >   DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
> >   } while (0)
> >
> >   #define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
> >   do { \
> >   (void)(link); \
> > - dc_conn_log_hex_linux(hex_data, hex_len); \
> >   DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
> >   } while (0)
> >
> >
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org

Re: [PATCH][next] drm/amdgpu/vcn: fix spelling mistake "fimware" -> "firmware"

2020-04-01 Thread Alex Deucher
On Wed, Apr 1, 2020 at 12:35 PM Colin King  wrote:
>
> From: Colin Ian King 
>
> There is a spelling mistake in a dev_err error message. Fix it.
>
> Signed-off-by: Colin Ian King 

Applied.  thanks!

Alex

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index 328b6ceb80de..d653a18dcbc3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -187,7 +187,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
> PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 
> >vcn.inst[i].fw_shared_bo,
> >vcn.inst[i].fw_shared_gpu_addr, 
> >vcn.inst[i].fw_shared_cpu_addr);
> if (r) {
> -   dev_err(adev->dev, "VCN %d (%d) failed to allocate 
> fimware shared bo\n", i, r);
> +   dev_err(adev->dev, "VCN %d (%d) failed to allocate 
> firmware shared bo\n", i, r);
> return r;
> }
> }
> --
> 2.25.1
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH][next] drm/amdgpu/vcn: fix spelling mistake "fimware" -> "firmware"

2020-04-01 Thread Colin King
From: Colin Ian King 

There is a spelling mistake in a dev_err error message. Fix it.

Signed-off-by: Colin Ian King 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 328b6ceb80de..d653a18dcbc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -187,7 +187,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 
>vcn.inst[i].fw_shared_bo,
>vcn.inst[i].fw_shared_gpu_addr, 
>vcn.inst[i].fw_shared_cpu_addr);
if (r) {
-   dev_err(adev->dev, "VCN %d (%d) failed to allocate 
fimware shared bo\n", i, r);
+   dev_err(adev->dev, "VCN %d (%d) failed to allocate 
firmware shared bo\n", i, r);
return r;
}
}
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 1/6] dma-buf: add peer2peer flag

2020-04-01 Thread Ruhl, Michael J
>-Original Message-
>From: dri-devel  On Behalf Of
>Daniel Vetter
>Sent: Wednesday, April 1, 2020 7:35 AM
>To: Christian König 
>Cc: amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org
>Subject: Re: [PATCH 1/6] dma-buf: add peer2peer flag
>
>On Mon, Mar 30, 2020 at 03:55:31PM +0200, Christian König wrote:
>> Add a peer2peer flag noting that the importer can deal with device
>> resources which are not backed by pages.
>>
>> Signed-off-by: Christian König 
>> ---
>>  drivers/dma-buf/dma-buf.c |  2 ++
>>  include/linux/dma-buf.h   | 10 ++
>>  2 files changed, 12 insertions(+)
>>
>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>> index ccc9eda1bc28..570c923023e6 100644
>> --- a/drivers/dma-buf/dma-buf.c
>> +++ b/drivers/dma-buf/dma-buf.c
>> @@ -690,6 +690,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf,
>struct device *dev,
>>
>>  attach->dev = dev;
>>  attach->dmabuf = dmabuf;
>> +if (importer_ops)
>> +attach->peer2peer = importer_ops->allow_peer2peer;
>
>So an idea that crossed my mind to validate this, since we need quite some
>bad amounts of bad luck if someone accidentally introduces and access to
>struct_page in sg lists in some slowpath.
>
>On map_sg, if ->peer2peer is set, we could mangle the struct_page
>pointers, e.g. swap high bits for low bits (so that NULL stays NULL). On
>unmap_sg we obviously need to undo that, in case the exporter needs those
>pointers for its own book-keeping for some reason. I was also pondering
>just setting them all to NULL, but that might break some exporters. With
>the pointer mangling trick (especially if we flip high for low bits on 64
>where this should result in invalid addresses in almost all cases) we
>should be able to catch buggy p2p importers quite quickly.

The scatter list usage of the struct page pointer has other information in the
lower bits for keeping track of linking and other stuff.  Swizzling the page
pointers will probably make the scatter list unusable.

Mike

>Thoughts? Maybe add as a follow-up patch for testing?
>-Daniel
>>  attach->importer_ops = importer_ops;
>>  attach->importer_priv = importer_priv;
>>
>> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
>> index 1ade486fc2bb..82e0a4a64601 100644
>> --- a/include/linux/dma-buf.h
>> +++ b/include/linux/dma-buf.h
>> @@ -334,6 +334,14 @@ struct dma_buf {
>>   * Attachment operations implemented by the importer.
>>   */
>>  struct dma_buf_attach_ops {
>> +/**
>> + * @allow_peer2peer:
>> + *
>> + * If this is set to true the importer must be able to handle peer
>> + * resources without struct pages.
>> + */
>> +bool allow_peer2peer;
>> +
>>  /**
>>   * @move_notify
>>   *
>> @@ -362,6 +370,7 @@ struct dma_buf_attach_ops {
>>   * @node: list of dma_buf_attachment, protected by dma_resv lock of the
>dmabuf.
>>   * @sgt: cached mapping.
>>   * @dir: direction of cached mapping.
>> + * @peer2peer: true if the importer can handle peer resources without
>pages.
>>   * @priv: exporter specific attachment data.
>>   * @importer_ops: importer operations for this attachment, if provided
>>   * dma_buf_map/unmap_attachment() must be called with the dma_resv
>lock held.
>> @@ -382,6 +391,7 @@ struct dma_buf_attachment {
>>  struct list_head node;
>>  struct sg_table *sgt;
>>  enum dma_data_direction dir;
>> +bool peer2peer;
>>  const struct dma_buf_attach_ops *importer_ops;
>>  void *importer_priv;
>>  void *priv;
>> --
>> 2.17.1
>>
>
>--
>Daniel Vetter
>Software Engineer, Intel Corporation
>http://blog.ffwll.ch
>___
>dri-devel mailing list
>dri-de...@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v3 1/1] drm/amdgpu: rework sched_list generation

2020-04-01 Thread Nirmoy


On 4/1/20 5:02 PM, Luben Tuikov wrote:

On 2020-03-31 08:46, Nirmoy wrote:

On 3/31/20 3:01 AM, Luben Tuikov wrote:

This patch seems to be using DOS line-endings.


Strange, I don't see that in my local patch file.


Not sure why "git am" complained about DOS endings
the first time I downloaded it. Second time was fine.

[snip]>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index 29f0a410091b..27abbdc603dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -721,6 +721,11 @@ struct amd_powerplay {
const struct amd_pm_funcs *pp_funcs;
   };

+struct amdgpu_sched {
+   uint32_tnum_scheds;
+   struct drm_gpu_scheduler*sched[HWIP_MAX_INSTANCE];
+};
+
   #define AMDGPU_RESET_MAGIC_NUM 64
   #define AMDGPU_MAX_DF_PERFMONS 4
   struct amdgpu_device {
@@ -858,6 +863,8 @@ struct amdgpu_device {
struct amdgpu_ring  *rings[AMDGPU_MAX_RINGS];
boolib_pool_ready;
struct amdgpu_sa_managerring_tmp_bo[AMDGPU_IB_POOL_MAX];
+   /* drm scheduler list */
+   struct amdgpu_sched 
gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];

That's a 2-dimensional array of "struct amdgpu_sched".
I think that the comment should be removed, or at least
not say "drm scheduler list". (I can see the structure
definition above.)


Yes I should remove it.



If this is the path you want to go, consider removing
"num_scheds" and creating a three dimensional array,
which would really essentialize the direction you want
to go:

struct drm_gpu_scheduler 
*gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX][HWIP_MAX_INSTANCE];

Now that this architecture is stripped down to its essentials,
perhaps we can see some optimizations...?


If you mean whether we should see any performance improvement then imo
we may not see much

difference as we are using pretty much same number of memory access plus
now we have extra cost of array_index_nospec().

Also this is  not hot code path. We do only 1
amdgpu_ctx_init_entity()/HW_IP/Context.

No, this has nothing to do with "performance".
It's all about architecture and design.

You seem to have array-array-struct with array and int,
and it seems much cleaner to just have array-array-array.
I think you don't need to break the chain with
struct of int and array, just as I described
in my comment below which you snipped without addressing it.

If you're not going to address a comment, don't delete it, leave it
for others to see that it hasn't been addressed. Only
snip previously addressed and resolved comments and previously
seen patch info, like diffstat/etc.



I wanted to  understand "running pointer" before I could comment in there.





Also consider that since you're creating an array of pointers,
you don't necessarily need to know their count. Your hot-path
algorithms should not need to know it. If you need to print
their count, say in sysfs, then you can count them up on
behalf of the user-space process cat-ing the sysfs entry.


[snip]


+
+   /* set to default prio if sched_list is NULL */
+   if (!adev->gpu_sched[hw_ip][hw_prio].num_scheds)
+   hw_prio = AMDGPU_RING_PRIO_DEFAULT;

That comment is a bit confusing--it talks about a list
not being NULL, while the conditional implicitly checks
against 0.


Yes, this is wrong, will remove it.




I wish you hadn't snipped my comment here, but address it
one way or the other. It is:

I'd much rather that integer comparison be performed against
integers, as opposed to using logical NOT operator (which is
implicit in comparing against 0), i.e.,

if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)

Also, architecturally, there seems to be informational
redundancy, in keeping an integer count and list of
objects at the same time, as the above if-conditional
exposes: the comment talks about a list and NULL but
the if-conditional implicitly checks for 0.



Number of valid drm scheduler in adev->gpu_sched[hw_ip][hw_prio].sched 
will vary depending on priority and hw_ip.


We need to pass that scheduler  array and num_scheds to 
drm_sched_entity_init(). I think we often use


array and integer count together when the number of valid items in the 
array is dynamic.





Perhaps, we don't need "num_scheds" and you can just
check if the index is NULL and assign PRIO_DEFAULT.


@@ -258,6 +272,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct 
amdgpu_ring *ring,
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(>priority_mutex);

+   if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
+   hw_ip = amdgpu_ring_type_to_drm_hw_ip[ring->funcs->type];
+   num_sched = >gpu_sched[hw_ip][hw_prio].num_scheds;
+   adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] = 
>sched;
+   }
This seems unnecessarily complicated. Perhaps we can remove
"num_scheds", as 

Re: [PATCH 5/5] drm/amdgpu: utilize subconnector property for DP through DisplayManager

2020-04-01 Thread Alex Deucher
On Wed, Apr 1, 2020 at 8:17 AM Jeevan B  wrote:
>
> Since DP-specific information is stored in driver's structures, every
> driver needs to implement subconnector property by itself. Display
> Core already has the subconnector information, we only need to
> expose it through DRM property.
>
> Signed-off-by: Oleg Vasilev 
> Tested-by: Oleg Vasilev 
> Cc: Alex Deucher 
> Cc: Christian König 
> Cc: David (ChunMing) Zhou 
> Cc: amd-gfx@lists.freedesktop.org
> Signed-off-by: Jeevan B 
> Link: 
> https://patchwork.freedesktop.org/patch/msgid/20190829114854.1539-7-oleg.vasi...@intel.com

Is this just a resend or a new version?  If it's new can you send out
the entire series to the mailing list instead of individual
developers?

Also, what are the subconnectors used for?  Just informational?

Alex

> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  | 41 
> +-
>  .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c|  3 ++
>  2 files changed, 43 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index d3674d8..91c0ef2 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -121,6 +121,42 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
>  static int amdgpu_dm_init(struct amdgpu_device *adev);
>  static void amdgpu_dm_fini(struct amdgpu_device *adev);
>
> +static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
> +{
> +   switch (link->dpcd_caps.dongle_type) {
> +   case DISPLAY_DONGLE_NONE:
> +   return DRM_MODE_SUBCONNECTOR_Native;
> +   case DISPLAY_DONGLE_DP_VGA_CONVERTER:
> +   return DRM_MODE_SUBCONNECTOR_VGA;
> +   case DISPLAY_DONGLE_DP_DVI_CONVERTER:
> +   case DISPLAY_DONGLE_DP_DVI_DONGLE:
> +   return DRM_MODE_SUBCONNECTOR_DVID;
> +   case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
> +   case DISPLAY_DONGLE_DP_HDMI_DONGLE:
> +   return DRM_MODE_SUBCONNECTOR_HDMIA;
> +   case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
> +   default:
> +   return DRM_MODE_SUBCONNECTOR_Unknown;
> +   }
> +}
> +
> +static void update_subconnector_property(struct amdgpu_dm_connector 
> *aconnector)
> +{
> +   struct dc_link *link = aconnector->dc_link;
> +   struct drm_connector *connector = >base;
> +   enum drm_mode_subconnector subconnector = 
> DRM_MODE_SUBCONNECTOR_Unknown;
> +
> +   if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
> +   return;
> +
> +   if (aconnector->dc_sink)
> +   subconnector = get_subconnector_type(link);
> +
> +   drm_object_property_set_value(>base,
> +   connector->dev->mode_config.dp_subconnector_property,
> +   subconnector);
> +}
> +
>  /*
>   * initializes drm_device display related structures, based on the 
> information
>   * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
> @@ -1917,7 +1953,6 @@ void amdgpu_dm_update_connector_after_detect(
> if (aconnector->mst_mgr.mst_state == true)
> return;
>
> -
> sink = aconnector->dc_link->local_sink;
> if (sink)
> dc_sink_retain(sink);
> @@ -2038,6 +2073,8 @@ void amdgpu_dm_update_connector_after_detect(
>
> mutex_unlock(>mode_config.mutex);
>
> +   update_subconnector_property(aconnector);
> +
> if (sink)
> dc_sink_release(sink);
>  }
> @@ -4518,6 +4555,8 @@ amdgpu_dm_connector_detect(struct drm_connector 
> *connector, bool force)
> else
> connected = (aconnector->base.force == DRM_FORCE_ON);
>
> +   update_subconnector_property(aconnector);
> +
> return (connected ? connector_status_connected :
> connector_status_disconnected);
>  }
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> index e8208df..dab81a6 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> @@ -26,6 +26,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include "dm_services.h"
>  #include "amdgpu.h"
>  #include "amdgpu_dm.h"
> @@ -485,6 +486,8 @@ void amdgpu_dm_initialize_dp_connector(struct 
> amdgpu_display_manager *dm,
> 16,
> 4,
> aconnector->connector_id);
> +
> +   drm_mode_add_dp_subconnector_property(>base);
>  }
>
>  int dm_mst_get_pbn_divider(struct dc_link *link)
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org

Re: [PATCH v3 1/1] drm/amdgpu: rework sched_list generation

2020-04-01 Thread Luben Tuikov
On 2020-03-31 08:46, Nirmoy wrote:
> 
> On 3/31/20 3:01 AM, Luben Tuikov wrote:
>> This patch seems to be using DOS line-endings.
> 
> 
> Strange, I don't see that in my local patch file.
> 

Not sure why "git am" complained about DOS endings
the first time I downloaded it. Second time was fine.

[snip]>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> index 29f0a410091b..27abbdc603dd 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> @@ -721,6 +721,11 @@ struct amd_powerplay {
>>> const struct amd_pm_funcs *pp_funcs;
>>>   };
>>>
>>> +struct amdgpu_sched {
>>> +   uint32_tnum_scheds;
>>> +   struct drm_gpu_scheduler*sched[HWIP_MAX_INSTANCE];
>>> +};
>>> +
>>>   #define AMDGPU_RESET_MAGIC_NUM 64
>>>   #define AMDGPU_MAX_DF_PERFMONS 4
>>>   struct amdgpu_device {
>>> @@ -858,6 +863,8 @@ struct amdgpu_device {
>>> struct amdgpu_ring  *rings[AMDGPU_MAX_RINGS];
>>> boolib_pool_ready;
>>> struct amdgpu_sa_managerring_tmp_bo[AMDGPU_IB_POOL_MAX];
>>> +   /* drm scheduler list */
>>> +   struct amdgpu_sched 
>>> gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
>> That's a 2-dimensional array of "struct amdgpu_sched".
>> I think that the comment should be removed, or at least
>> not say "drm scheduler list". (I can see the structure
>> definition above.)
> 
> 
> Yes I should remove it.
> 
> 
>> If this is the path you want to go, consider removing
>> "num_scheds" and creating a three dimensional array,
>> which would really essentialize the direction you want
>> to go:
>>
>> struct drm_gpu_scheduler 
>> *gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX][HWIP_MAX_INSTANCE];
>>
>> Now that this architecture is stripped down to its essentials,
>> perhaps we can see some optimizations...?
> 
> 
> If you mean whether we should see any performance improvement then imo 
> we may not see much
> 
> difference as we are using pretty much same number of memory access plus 
> now we have extra cost of array_index_nospec().
> 
> Also this is  not hot code path. We do only 1 
> amdgpu_ctx_init_entity()/HW_IP/Context.

No, this has nothing to do with "performance".
It's all about architecture and design.

You seem to have array-array-struct with array and int,
and it seems much cleaner to just have array-array-array.
I think you don't need to break the chain with
struct of int and array, just as I described
in my comment below which you snipped without addressing it.

If you're not going to address a comment, don't delete it, leave it
for others to see that it hasn't been addressed. Only
snip previously addressed and resolved comments and previously
seen patch info, like diffstat/etc.

>> Also consider that since you're creating an array of pointers,
>> you don't necessarily need to know their count. Your hot-path
>> algorithms should not need to know it. If you need to print
>> their count, say in sysfs, then you can count them up on
>> behalf of the user-space process cat-ing the sysfs entry.
>>

[snip]

>>> +
>>> +   /* set to default prio if sched_list is NULL */
>>> +   if (!adev->gpu_sched[hw_ip][hw_prio].num_scheds)
>>> +   hw_prio = AMDGPU_RING_PRIO_DEFAULT;
>> That comment is a bit confusing--it talks about a list
>> not being NULL, while the conditional implicitly checks
>> against 0.
> 
> 
> Yes, this is wrong, will remove it.
> 
> 
> 

I wish you hadn't snipped my comment here, but address it
one way or the other. It is:

I'd much rather that integer comparison be performed against
integers, as opposed to using logical NOT operator (which is
implicit in comparing against 0), i.e.,

if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)

Also, architecturally, there seems to be informational
redundancy, in keeping an integer count and list of
objects at the same time, as the above if-conditional
exposes: the comment talks about a list and NULL but
the if-conditional implicitly checks for 0.

Perhaps, we don't need "num_scheds" and you can just
check if the index is NULL and assign PRIO_DEFAULT.

>> @@ -258,6 +272,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct 
>> amdgpu_ring *ring,
>>  ring->priority = DRM_SCHED_PRIORITY_NORMAL;
>>  mutex_init(>priority_mutex);
>>
>> +if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
>> +hw_ip = amdgpu_ring_type_to_drm_hw_ip[ring->funcs->type];
>> +num_sched = >gpu_sched[hw_ip][hw_prio].num_scheds;
>> +adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] = 
>> >sched;
>> +}
>> This seems unnecessarily complicated. Perhaps we can remove
>> "num_scheds", as recommended above, and keep a running pointer
>> while initialization is being done...?
> 
> 
> What do you mean by running pointer ?

A "running pointer" is a local pointer you're using temporarily
to traverse memory. If you remove the "num_scheds", as noted 

Re: [PATCH] drm/amdkfd: Provide SMI events watch

2020-04-01 Thread Amber Lin
Thanks Felix for the review. I have a better understanding of how kfifo 
works now and have changed my code quite a bit. Couple of questions 
below inline regarding the gpu_id and data arguments.


Thanks.

Amber

On 2020-03-26 4:53 p.m., Felix Kuehling wrote:


Hi Amber,

I see that this is based on the debugger event code. Jon and I are 
just working through some issues with that code. The lessons from that 
will need to be applied to this as well. But I think we can define 
your API to simplify this a bit.


The basic problem is, that we have one Fifo in the kfd_device, but 
potentially multiple file descriptors referring to it. For the event 
interface I think we can enforce only a single file descriptor per 
device. If there is already one, your register call can fail. See more 
comments inline.


On 2020-03-17 13:57, Amber Lin wrote:

When the compute is malfunctioning or performance drops, the system admin
will use SMI (System Management Interface) tool to monitor/diagnostic what
went wrong. This patch provides an event watch interface for the user
space to register events they are interested. After the event is
registered, the user can use annoymous file descriptor's pull function


pull -> poll


Thank you for spotting the typo. I’ll change that.


with wait-time specified to wait for the event to happen. Once the event
happens, the user can use read() to retrieve information related to the
event.

VM fault event is done in this patch.

Signed-off-by: Amber Lin
---
  drivers/gpu/drm/amd/amdkfd/Makefile  |   3 +-
  drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c |   2 +
  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c |  38 ++
  drivers/gpu/drm/amd/amdkfd/kfd_device.c  |   1 +
  drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c  |   2 +
  drivers/gpu/drm/amd/amdkfd/kfd_priv.h|  10 ++
  drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c  | 143 +++
  drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h  |  41 +++
  include/uapi/linux/kfd_ioctl.h   |  27 -
  9 files changed, 265 insertions(+), 2 deletions(-)
  create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
  create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h

diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile 
b/drivers/gpu/drm/amd/amdkfd/Makefile
index 6147462..cc98b4a 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -53,7 +53,8 @@ AMDKFD_FILES  := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_int_process_v9.o \
$(AMDKFD_PATH)/kfd_dbgdev.o \
$(AMDKFD_PATH)/kfd_dbgmgr.o \
-   $(AMDKFD_PATH)/kfd_crat.o
+   $(AMDKFD_PATH)/kfd_crat.o \
+   $(AMDKFD_PATH)/kfd_smi_events.o
  
  ifneq ($(CONFIG_AMD_IOMMU_V2),)

  AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c 
b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 9f59ba9..24b4717 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -24,6 +24,7 @@
  #include "kfd_events.h"
  #include "cik_int.h"
  #include "amdgpu_amdkfd.h"
+#include "kfd_smi_events.h"
  
  static bool cik_event_interrupt_isr(struct kfd_dev *dev,

const uint32_t *ih_ring_entry,
@@ -107,6 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
struct kfd_vm_fault_info info;
  
+		kfd_smi_event_update_vmfault(dev, pasid);

kfd_process_vm_fault(dev->dqm, pasid);
  
  		memset(, 0, sizeof(info));

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f8fa03a..8e92956 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -39,6 +39,7 @@
  #include "kfd_device_queue_manager.h"
  #include "kfd_dbgmgr.h"
  #include "amdgpu_amdkfd.h"
+#include "kfd_smi_events.h"
  
  static long kfd_ioctl(struct file *, unsigned int, unsigned long);

  static int kfd_open(struct inode *, struct file *);
@@ -1243,6 +1244,40 @@ static int kfd_ioctl_acquire_vm(struct file *filep, 
struct kfd_process *p,
return ret;
  }
  
+/* Handle requests for watching SMI events */

+static int kfd_ioctl_smi_events(struct file *filep,
+   struct kfd_process *p, void *data)
+{
+   struct kfd_ioctl_smi_events_args *args = data;
+   struct kfd_dev *dev;
+   int ret = 0;
+
+   dev = kfd_device_by_id(args->gpu_id);
+   if (!dev)
+   return -EINVAL;
+
+   switch (args->op) {
+   case KFD_SMI_EVENTS_REGISTER:
+   ret = kfd_smi_event_register(dev, args->events);
+   if (ret >= 0) {
+   /* When the registration is successful, it returns the
+* annoymous 

Re: [PATCH 2/2] drm/amd/dc: Kill dc_conn_log_hex_linux()

2020-04-01 Thread Kazlauskas, Nicholas

On 2020-03-31 5:22 p.m., Lyude Paul wrote:

DRM already supports tracing DPCD transactions, there's no reason for
the existence of this function. Also, it prints one byte per-line which
is way too loud. So, just remove it.

Signed-off-by: Lyude Paul 


Thanks for helping clean this up!

Series is:

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/dc/basics/Makefile|  3 +-
  .../drm/amd/display/dc/basics/log_helpers.c   | 39 ---
  .../amd/display/include/logger_interface.h|  4 --
  3 files changed, 1 insertion(+), 45 deletions(-)
  delete mode 100644 drivers/gpu/drm/amd/display/dc/basics/log_helpers.c

diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile 
b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 7ad0cad0f4ef..01b99e0d788e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -24,8 +24,7 @@
  # It provides the general basic services required by other DAL
  # subcomponents.
  
-BASICS = conversion.o fixpt31_32.o \

-   log_helpers.o vector.o dc_common.o
+BASICS = conversion.o fixpt31_32.o vector.o dc_common.o
  
  AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
  
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c

deleted file mode 100644
index 26583f346c39..
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "core_types.h"
-#include "logger.h"
-#include "include/logger_interface.h"
-#include "dm_helpers.h"
-
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
-{
-   int i;
-
-   if (hex_data)
-   for (i = 0; i < hex_data_count; i++)
-   DC_LOG_DEBUG("%2.2X ", hex_data[i]);
-}
-
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h 
b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 6e008de25629..02c23b04d34b 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,8 +40,6 @@ struct dc_state;
   *
   */
  
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);

-
  void pre_surface_trace(
struct dc *dc,
const struct dc_plane_state *const *plane_states,
@@ -102,14 +100,12 @@ void context_clock_trace(
  #define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
do { \
(void)(link); \
-   dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
} while (0)
  
  #define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \

do { \
(void)(link); \
-   dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
} while (0)
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Guard calls to hdcp_ta and dtm_ta

2020-04-01 Thread Kazlauskas, Nicholas

On 2020-03-31 2:08 p.m., Lakha, Bhawanpreet wrote:

[AMD Official Use Only - Internal Distribution Only]


mod_hdcp_hdcp2_get_link_encryption_status() isn't being used, should 
probably remove it in a followup patch


I still think it's better if you add it to the function for consistency.

Feel free to fix that bit up before you merge if you want, either way 
this patch is:


Reiviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas



*From:* Kazlauskas, Nicholas 
*Sent:* March 31, 2020 2:03 PM
*To:* Alex Deucher ; Lakha, Bhawanpreet 

*Cc:* Deucher, Alexander ; amd-gfx list 
; Zhang, Hawking 

*Subject:* Re: [PATCH] drm/amd/display: Guard calls to hdcp_ta and dtm_ta
On 2020-03-31 1:37 p.m., Alex Deucher wrote:

On Mon, Mar 30, 2020 at 6:36 PM Bhawanpreet Lakha
 wrote:


[Why]
The buffer used when calling psp is a shared buffer. If we have multiple calls
at the same time we can overwrite the buffer.

[How]
Add mutex to guard the shared buffer.

Signed-off-by: Bhawanpreet Lakha 


Acked-by: Alex Deucher 


One comment inline:




---
   drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c   |   2 +
   drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h   |   2 +
   .../drm/amd/display/modules/hdcp/hdcp_psp.c   | 420 +++---
   3 files changed, 257 insertions(+), 167 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index dbaeffc4431e..9d587bc27663 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -888,6 +888,7 @@ static int psp_hdcp_load(struct psp_context *psp)
  if (!ret) {
  psp->hdcp_context.hdcp_initialized = true;
  psp->hdcp_context.session_id = cmd->resp.session_id;
+   mutex_init(>hdcp_context.mutex);
  }

  kfree(cmd);
@@ -1033,6 +1034,7 @@ static int psp_dtm_load(struct psp_context *psp)
  if (!ret) {
  psp->dtm_context.dtm_initialized = true;
  psp->dtm_context.session_id = cmd->resp.session_id;
+   mutex_init(>dtm_context.mutex);
  }

  kfree(cmd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 297435c0c7c1..6a717fd5efc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -161,6 +161,7 @@ struct psp_hdcp_context {
  struct amdgpu_bo    *hdcp_shared_bo;
  uint64_t    hdcp_shared_mc_addr;
  void    *hdcp_shared_buf;
+   struct mutex    mutex;
   };

   struct psp_dtm_context {
@@ -169,6 +170,7 @@ struct psp_dtm_context {
  struct amdgpu_bo    *dtm_shared_bo;
  uint64_t    dtm_shared_mc_addr;
  void    *dtm_shared_buf;
+   struct mutex    mutex;
   };

   #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index c2929815c3ee..aa147e171557 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -51,12 +51,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
  struct ta_dtm_shared_memory *dtm_cmd;
  struct mod_hdcp_display *display =
  get_active_display_at_index(hdcp, index);
+   enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;

  dtm_cmd = (struct ta_dtm_shared_memory 
*)psp->dtm_context.dtm_shared_buf;

  if (!display || !is_display_added(display))
  return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;

+   mutex_lock(>dtm_context.mutex);
+
  memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));

  dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -66,14 +69,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(

  psp_dtm_invoke(psp, dtm_cmd->cmd_id);

-   if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
-   return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+   if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+   status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+   } else {
+   display->state = MOD_HDCP_DISPLAY_ACTIVE;
+   HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+   }

-   display->state = MOD_HDCP_DISPLAY_ACTIVE;
-   HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
-
-   return MOD_HDCP_STATUS_SUCCESS;
-
+   mutex_unlock(>dtm_context.mutex);
+   return status;
   }
   enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
    uint8_t index)
@@ -83,6 +87,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct 
mod_hdcp *hdcp,
  

Re: [PATCH] drm/amdkfd: kfree the wrong pointer

2020-04-01 Thread Nirmoy



On 4/1/20 2:12 PM, Jack Zhang wrote:

Originally, it kfrees the wrong pointer for mem_obj.
It would cause memory leak under stress test.

Signed-off-by: Jack Zhang 



Acked-by: Nirmoy Das 



---
  drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7866cd06..0491ab2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1134,9 +1134,9 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int 
size,
return 0;
  
  kfd_gtt_no_free_chunk:

-   pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
+   pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(>gtt_sa_lock);
-   kfree(mem_obj);
+   kfree(*mem_obj);
return -ENOMEM;
  }
  

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/5] drm/amdgpu: utilize subconnector property for DP through DisplayManager

2020-04-01 Thread Jeevan B
Since DP-specific information is stored in driver's structures, every
driver needs to implement subconnector property by itself. Display
Core already has the subconnector information, we only need to
expose it through DRM property.

Signed-off-by: Oleg Vasilev 
Tested-by: Oleg Vasilev 
Cc: Alex Deucher 
Cc: Christian König 
Cc: David (ChunMing) Zhou 
Cc: amd-gfx@lists.freedesktop.org
Signed-off-by: Jeevan B 
Link: 
https://patchwork.freedesktop.org/patch/msgid/20190829114854.1539-7-oleg.vasi...@intel.com
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  | 41 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c|  3 ++
 2 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d3674d8..91c0ef2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -121,6 +121,42 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 static int amdgpu_dm_init(struct amdgpu_device *adev);
 static void amdgpu_dm_fini(struct amdgpu_device *adev);
 
+static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
+{
+   switch (link->dpcd_caps.dongle_type) {
+   case DISPLAY_DONGLE_NONE:
+   return DRM_MODE_SUBCONNECTOR_Native;
+   case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+   return DRM_MODE_SUBCONNECTOR_VGA;
+   case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+   case DISPLAY_DONGLE_DP_DVI_DONGLE:
+   return DRM_MODE_SUBCONNECTOR_DVID;
+   case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+   case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+   return DRM_MODE_SUBCONNECTOR_HDMIA;
+   case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+   default:
+   return DRM_MODE_SUBCONNECTOR_Unknown;
+   }
+}
+
+static void update_subconnector_property(struct amdgpu_dm_connector 
*aconnector)
+{
+   struct dc_link *link = aconnector->dc_link;
+   struct drm_connector *connector = >base;
+   enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+   if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+   return;
+
+   if (aconnector->dc_sink)
+   subconnector = get_subconnector_type(link);
+
+   drm_object_property_set_value(>base,
+   connector->dev->mode_config.dp_subconnector_property,
+   subconnector);
+}
+
 /*
  * initializes drm_device display related structures, based on the information
  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
@@ -1917,7 +1953,6 @@ void amdgpu_dm_update_connector_after_detect(
if (aconnector->mst_mgr.mst_state == true)
return;
 
-
sink = aconnector->dc_link->local_sink;
if (sink)
dc_sink_retain(sink);
@@ -2038,6 +2073,8 @@ void amdgpu_dm_update_connector_after_detect(
 
mutex_unlock(>mode_config.mutex);
 
+   update_subconnector_property(aconnector);
+
if (sink)
dc_sink_release(sink);
 }
@@ -4518,6 +4555,8 @@ amdgpu_dm_connector_detect(struct drm_connector 
*connector, bool force)
else
connected = (aconnector->base.force == DRM_FORCE_ON);
 
+   update_subconnector_property(aconnector);
+
return (connected ? connector_status_connected :
connector_status_disconnected);
 }
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e8208df..dab81a6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "dm_services.h"
 #include "amdgpu.h"
 #include "amdgpu_dm.h"
@@ -485,6 +486,8 @@ void amdgpu_dm_initialize_dp_connector(struct 
amdgpu_display_manager *dm,
16,
4,
aconnector->connector_id);
+
+   drm_mode_add_dp_subconnector_property(>base);
 }
 
 int dm_mst_get_pbn_divider(struct dc_link *link)
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdkfd: kfree the wrong pointer

2020-04-01 Thread Jack Zhang
Originally, it kfrees the wrong pointer for mem_obj.
It would cause memory leak under stress test.

Signed-off-by: Jack Zhang 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7866cd06..0491ab2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1134,9 +1134,9 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int 
size,
return 0;
 
 kfd_gtt_no_free_chunk:
-   pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
+   pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(>gtt_sa_lock);
-   kfree(mem_obj);
+   kfree(*mem_obj);
return -ENOMEM;
 }
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Yuxian Dai
1.Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2.we should show the current working clock freqency from clock table metric

Signed-off-by: Yuxian Dai 
Reviewed-by: Alex Deucher 
Reviewed-by: Huang Rui 
Reviewed-by: Kevin Wang 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 6 ++
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..c6b39a7026a8 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+   bool cur_value_match_level = false;
 
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+   if (cur_value == value)
+   cur_value_match_level = true;
}
 
+   if (!cur_value_match_level)
+   size += sprintf(buf + size, "   %uMhz *\n", cur_value);
+
return size;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq;\
break;  \
case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
break;  \
case SMU_DCEFCLK:   \
freq = table->DcfClocks[dpm_level].Freq;\
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/6] dma-buf: add peer2peer flag

2020-04-01 Thread Daniel Vetter
On Mon, Mar 30, 2020 at 03:55:31PM +0200, Christian König wrote:
> Add a peer2peer flag noting that the importer can deal with device
> resources which are not backed by pages.
> 
> Signed-off-by: Christian König 
> ---
>  drivers/dma-buf/dma-buf.c |  2 ++
>  include/linux/dma-buf.h   | 10 ++
>  2 files changed, 12 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index ccc9eda1bc28..570c923023e6 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -690,6 +690,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct 
> device *dev,
>  
>   attach->dev = dev;
>   attach->dmabuf = dmabuf;
> + if (importer_ops)
> + attach->peer2peer = importer_ops->allow_peer2peer;

So an idea that crossed my mind to validate this, since we need quite some
bad amounts of bad luck if someone accidentally introduces and access to
struct_page in sg lists in some slowpath.

On map_sg, if ->peer2peer is set, we could mangle the struct_page
pointers, e.g. swap high bits for low bits (so that NULL stays NULL). On
unmap_sg we obviously need to undo that, in case the exporter needs those
pointers for its own book-keeping for some reason. I was also pondering
just setting them all to NULL, but that might break some exporters. With
the pointer mangling trick (especially if we flip high for low bits on 64
where this should result in invalid addresses in almost all cases) we
should be able to catch buggy p2p importers quite quickly.

Thoughts? Maybe add as a follow-up patch for testing?
-Daniel
>   attach->importer_ops = importer_ops;
>   attach->importer_priv = importer_priv;
>  
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index 1ade486fc2bb..82e0a4a64601 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -334,6 +334,14 @@ struct dma_buf {
>   * Attachment operations implemented by the importer.
>   */
>  struct dma_buf_attach_ops {
> + /**
> +  * @allow_peer2peer:
> +  *
> +  * If this is set to true the importer must be able to handle peer
> +  * resources without struct pages.
> +  */
> + bool allow_peer2peer;
> +
>   /**
>* @move_notify
>*
> @@ -362,6 +370,7 @@ struct dma_buf_attach_ops {
>   * @node: list of dma_buf_attachment, protected by dma_resv lock of the 
> dmabuf.
>   * @sgt: cached mapping.
>   * @dir: direction of cached mapping.
> + * @peer2peer: true if the importer can handle peer resources without pages.
>   * @priv: exporter specific attachment data.
>   * @importer_ops: importer operations for this attachment, if provided
>   * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
> @@ -382,6 +391,7 @@ struct dma_buf_attachment {
>   struct list_head node;
>   struct sg_table *sgt;
>   enum dma_data_direction dir;
> + bool peer2peer;
>   const struct dma_buf_attach_ops *importer_ops;
>   void *importer_priv;
>   void *priv;
> -- 
> 2.17.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Yuxian Dai
From: "yuxia...@amd.com" 

1,Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2,we should show the current working clock freqency from clock table metric

Change-Id: Ia45f3069fc7ae56db495cb5a3865e2c50c550774
Signed-off-by: Yuxian Dai 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 6 ++
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..5adc25c8f6f4 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+   bool cur_value_match_level = false;
 
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,7 +298,12 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+   if (cur_value == value) 
+   cur_value_match_level = true;
}
+   
+   if (!cur_value_match_level)
+   size += sprintf(buf + size, "   %uMhz *\n",cur_value);
 
return size;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq;\
break;  \
case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
break;  \
case SMU_DCEFCLK:   \
freq = table->DcfClocks[dpm_level].Freq;\
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Wang, Kevin(Yang)
[AMD Official Use Only - Internal Distribution Only]



From: amd-gfx  on behalf of Yuxian Dai 

Sent: Wednesday, April 1, 2020 3:14 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Dai, Yuxian (David) ; Dai, Yuxian (David) 

Subject: [PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

From: "yuxia...@amd.com" 

1,Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2,we should show the current working clock freqency from clock table metric

Change-Id: Ia45f3069fc7ae56db495cb5a3865e2c50c550774
Signed-off-by: Yuxian Dai 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 7 +++
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..3901b20196d7 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
 SmuMetrics_t metrics;
+   bool cur_value_match_level = false;

 if (!clk_table || clk_type >= SMU_CLK_COUNT)
 return -EINVAL;
@@ -297,7 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
 cur_value == value ? "*" : "");
+   if (cur_value == value)
+   cur_value_match_level = true;
 }
+
+   if (!cur_value_match_level)
+   size += sprintf(buf + size, "   %uMhz *\n",cur_value);
+
[kevin]:
after remove this unnecessary blank line,
Reviewed-by: Kevin Wang 

 return size;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
 freq = table->SocClocks[dpm_level].Freq;\
 break;  \
 case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
 break;  \
 case SMU_DCEFCLK:   \
 freq = table->DcfClocks[dpm_level].Freq;\
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CKevin1.Wang%40amd.com%7C7cef28fc315e4a1f705c08d7d60c542f%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637213221059785929sdata=eUFmm4J6hEfV%2FWJtU5s7hqFDSCF0i232Yz6QwoSAj7E%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Yuxian Dai
From: "yuxia...@amd.com" 

1,Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2,we should show the current working clock freqency from clock table metric

Change-Id: Ia45f3069fc7ae56db495cb5a3865e2c50c550774
Signed-off-by: Yuxian Dai 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 7 +++
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..3901b20196d7 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+   bool cur_value_match_level = false;
 
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,7 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+   if (cur_value == value) 
+   cur_value_match_level = true;
}
+   
+   if (!cur_value_match_level)
+   size += sprintf(buf + size, "   %uMhz *\n",cur_value);
+
 
return size;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq;\
break;  \
case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
break;  \
case SMU_DCEFCLK:   \
freq = table->DcfClocks[dpm_level].Freq;\
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Dai, Yuxian (David)
[AMD Official Use Only - Internal Distribution Only]

1,Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2,we should show the current working clock freqency from clock table metric

Change-Id: Ia45f3069fc7ae56db495cb5a3865e2c50c550774
Signed-off-by: Yuxian Dai mailto:yuxian@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 8 
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..5c5d3f974532 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
 SmuMetrics_t metrics;
+   bool cur_value_match_level = false;

 if (!clk_table || clk_type >= SMU_CLK_COUNT)
 return -EINVAL;
@@ -297,6 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
 cur_value == value ? "*" : "");
+   if(cur_value == value) {
+   cur_value_match_level = true;
+   }
+   }
+
+   if(!cur_value_match_level) {
+   size += sprintf(buf + size, "   %uMhz *\n",cur_value);
 }
[kevin]:
I have a little suggestion about coding style.
"Do not unnecessarily use braces where a single statement will do."
we'd better fix it.
thanks.

  *   Ok, I will remove it.

https://www.kernel.org/doc/html/latest/process/coding-style.html
Linux kernel coding style - The Linux Kernel 
documentation
Linux kernel coding style¶. This is a short document describing the preferred 
coding style for the linux kernel. Coding style is very personal, and I won't 
force my views on anybody, but this is what goes for anything that I have to be 
able to maintain, and I'd prefer it for most other things too. Please at least 
consider the points made here.
www.kernel.org

 return size;
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
 freq = table->SocClocks[dpm_level].Freq;\
 break;  \
 case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
 break;  \
 case SMU_DCEFCLK:   \
 freq = table->DcfClocks[dpm_level].Freq;\
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CKevin1.Wang%40amd.com%7Cac64217a79c44e06267008d7d602484e%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637213177638342283sdata=iEt1JzC0wcb7rQ%2F2TWBu7RfJGrChPpt%2Fc2jXPfqjruU%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Yuxian Dai
From: "yuxia...@amd.com" 

1,Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2,we should show the current working clock freqency from clock table metric

Change-Id: Ia45f3069fc7ae56db495cb5a3865e2c50c550774
Signed-off-by: Yuxian Dai 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 7 +++
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..30240fdff840 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+   bool cur_value_match_level = false;
 
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,7 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+   if(cur_value == value) 
+   cur_value_match_level = true;
}
+   
+   if(!cur_value_match_level)
+   size += sprintf(buf + size, "   %uMhz *\n",cur_value);
+
 
return size;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq;\
break;  \
case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
break;  \
case SMU_DCEFCLK:   \
freq = table->DcfClocks[dpm_level].Freq;\
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Wang, Kevin(Yang)
[AMD Official Use Only - Internal Distribution Only]



From: amd-gfx  on behalf of Yuxian Dai 

Sent: Wednesday, April 1, 2020 2:02 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Dai, Yuxian (David) ; Dai, Yuxian (David) 

Subject: [PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

From: "yuxia...@amd.com" 

1,Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2,we should show the current working clock freqency from clock table metric

Change-Id: Ia45f3069fc7ae56db495cb5a3865e2c50c550774
Signed-off-by: Yuxian Dai 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 8 
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..5c5d3f974532 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
 SmuMetrics_t metrics;
+   bool cur_value_match_level = false;

 if (!clk_table || clk_type >= SMU_CLK_COUNT)
 return -EINVAL;
@@ -297,6 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
 cur_value == value ? "*" : "");
+   if(cur_value == value) {
+   cur_value_match_level = true;
+   }
+   }
+
+   if(!cur_value_match_level) {
+   size += sprintf(buf + size, "   %uMhz *\n",cur_value);
 }
[kevin]:
I have a little suggestion about coding style.
"Do not unnecessarily use braces where a single statement will do."
we'd better fix it.
thanks.

https://www.kernel.org/doc/html/latest/process/coding-style.html
Linux kernel coding style — The Linux Kernel 
documentation
Linux kernel coding style¶. This is a short document describing the preferred 
coding style for the linux kernel. Coding style is very personal, and I won’t 
force my views on anybody, but this is what goes for anything that I have to be 
able to maintain, and I’d prefer it for most other things too. Please at least 
consider the points made here.
www.kernel.org


 return size;
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
 freq = table->SocClocks[dpm_level].Freq;\
 break;  \
 case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
 break;  \
 case SMU_DCEFCLK:   \
 freq = table->DcfClocks[dpm_level].Freq;\
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CKevin1.Wang%40amd.com%7Cac64217a79c44e06267008d7d602484e%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637213177638342283sdata=iEt1JzC0wcb7rQ%2F2TWBu7RfJGrChPpt%2Fc2jXPfqjruU%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK

2020-04-01 Thread Yuxian Dai
From: "yuxia...@amd.com" 

1,Using the FCLK DPM table to set the MCLK for DPM states consist of
three entities:
 FCLK
 UCLK
 MEMCLK
All these three clk change together, MEMCLK from FCLK, so use the fclk
frequency.
2,we should show the current working clock freqency from clock table metric

Change-Id: Ia45f3069fc7ae56db495cb5a3865e2c50c550774
Signed-off-by: Yuxian Dai 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 8 
 drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..5c5d3f974532 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+   bool cur_value_match_level = false;
 
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,6 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+   if(cur_value == value) {
+   cur_value_match_level = true;
+   }
+   }
+   
+   if(!cur_value_match_level) {
+   size += sprintf(buf + size, "   %uMhz *\n",cur_value);
}
 
return size;
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq;\
break;  \
case SMU_MCLK:  \
-   freq = table->MemClocks[dpm_level].Freq;\
+   freq = table->FClocks[dpm_level].Freq;  \
break;  \
case SMU_DCEFCLK:   \
freq = table->DcfClocks[dpm_level].Freq;\
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx