RE: [PATCH] drm/amd/powerplay: update Arcturus driver smu interface XGMI link part

2019-10-17 Thread Feng, Kenneth
Reviewed-by: Kenneth Feng 


-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Quan, 
Evan
Sent: Friday, October 18, 2019 1:49 PM
To: amd-gfx@lists.freedesktop.org
Cc: Quan, Evan 
Subject: [PATCH] drm/amd/powerplay: update Arcturus driver smu interface XGMI 
link part

[CAUTION: External Email]

To fit the latest SMU firmware.

Change-Id: Ie34e6930577b7a6fe993273f213732696628b264
Signed-off-by: Evan Quan 
---
 .../powerplay/inc/smu11_driver_if_arcturus.h  | 28 +--  
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +-
 2 files changed, 21 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index 2248d682c462..886b9a21ebd8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -423,18 +423,30 @@ typedef enum {
 } PwrConfig_e;

 typedef enum {
-  XGMI_LINK_RATE_12 = 0,  // 12Gbps
-  XGMI_LINK_RATE_16,  // 16Gbps
-  XGMI_LINK_RATE_22,  // 22Gbps
-  XGMI_LINK_RATE_25,  // 25Gbps
+  XGMI_LINK_RATE_2 = 2,// 2Gbps
+  XGMI_LINK_RATE_4 = 4,// 4Gbps
+  XGMI_LINK_RATE_8 = 8,// 8Gbps
+  XGMI_LINK_RATE_12 = 12,  // 12Gbps
+  XGMI_LINK_RATE_16 = 16,  // 16Gbps
+  XGMI_LINK_RATE_17 = 17,  // 17Gbps
+  XGMI_LINK_RATE_18 = 18,  // 18Gbps
+  XGMI_LINK_RATE_19 = 19,  // 19Gbps
+  XGMI_LINK_RATE_20 = 20,  // 20Gbps
+  XGMI_LINK_RATE_21 = 21,  // 21Gbps
+  XGMI_LINK_RATE_22 = 22,  // 22Gbps
+  XGMI_LINK_RATE_23 = 23,  // 23Gbps
+  XGMI_LINK_RATE_24 = 24,  // 24Gbps
+  XGMI_LINK_RATE_25 = 25,  // 25Gbps
   XGMI_LINK_RATE_COUNT
 } XGMI_LINK_RATE_e;

 typedef enum {
-  XGMI_LINK_WIDTH_2 = 0, // x2
-  XGMI_LINK_WIDTH_4, // x4
-  XGMI_LINK_WIDTH_8, // x8
-  XGMI_LINK_WIDTH_16,// x16
+  XGMI_LINK_WIDTH_1 = 1,   // x1
+  XGMI_LINK_WIDTH_2 = 2,   // x2
+  XGMI_LINK_WIDTH_4 = 4,   // x4
+  XGMI_LINK_WIDTH_8 = 8,   // x8
+  XGMI_LINK_WIDTH_9 = 9,   // x9
+  XGMI_LINK_WIDTH_16 = 16, // x16
   XGMI_LINK_WIDTH_COUNT
 } XGMI_LINK_WIDTH_e;

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 46214f53386d..5b18a066e644 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@

 #define SMU11_DRIVER_IF_VERSION_INV 0x  #define 
SMU11_DRIVER_IF_VERSION_VG20 0x13 -#define SMU11_DRIVER_IF_VERSION_ARCT 0x0D
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x0F
 #define SMU11_DRIVER_IF_VERSION_NV10 0x33  #define 
SMU11_DRIVER_IF_VERSION_NV12 0x33  #define SMU11_DRIVER_IF_VERSION_NV14 0x34
--
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amd/powerplay: update Arcturus driver smu interface XGMI link part

2019-10-17 Thread Quan, Evan
To fit the latest SMU firmware.

Change-Id: Ie34e6930577b7a6fe993273f213732696628b264
Signed-off-by: Evan Quan 
---
 .../powerplay/inc/smu11_driver_if_arcturus.h  | 28 +--
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +-
 2 files changed, 21 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index 2248d682c462..886b9a21ebd8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -423,18 +423,30 @@ typedef enum {
 } PwrConfig_e;
 
 typedef enum {
-  XGMI_LINK_RATE_12 = 0,  // 12Gbps
-  XGMI_LINK_RATE_16,  // 16Gbps
-  XGMI_LINK_RATE_22,  // 22Gbps
-  XGMI_LINK_RATE_25,  // 25Gbps
+  XGMI_LINK_RATE_2 = 2,// 2Gbps
+  XGMI_LINK_RATE_4 = 4,// 4Gbps
+  XGMI_LINK_RATE_8 = 8,// 8Gbps
+  XGMI_LINK_RATE_12 = 12,  // 12Gbps
+  XGMI_LINK_RATE_16 = 16,  // 16Gbps
+  XGMI_LINK_RATE_17 = 17,  // 17Gbps
+  XGMI_LINK_RATE_18 = 18,  // 18Gbps
+  XGMI_LINK_RATE_19 = 19,  // 19Gbps
+  XGMI_LINK_RATE_20 = 20,  // 20Gbps
+  XGMI_LINK_RATE_21 = 21,  // 21Gbps
+  XGMI_LINK_RATE_22 = 22,  // 22Gbps
+  XGMI_LINK_RATE_23 = 23,  // 23Gbps
+  XGMI_LINK_RATE_24 = 24,  // 24Gbps
+  XGMI_LINK_RATE_25 = 25,  // 25Gbps
   XGMI_LINK_RATE_COUNT
 } XGMI_LINK_RATE_e;
 
 typedef enum {
-  XGMI_LINK_WIDTH_2 = 0, // x2
-  XGMI_LINK_WIDTH_4, // x4
-  XGMI_LINK_WIDTH_8, // x8
-  XGMI_LINK_WIDTH_16,// x16
+  XGMI_LINK_WIDTH_1 = 1,   // x1
+  XGMI_LINK_WIDTH_2 = 2,   // x2
+  XGMI_LINK_WIDTH_4 = 4,   // x4
+  XGMI_LINK_WIDTH_8 = 8,   // x8
+  XGMI_LINK_WIDTH_9 = 9,   // x9
+  XGMI_LINK_WIDTH_16 = 16, // x16
   XGMI_LINK_WIDTH_COUNT
 } XGMI_LINK_WIDTH_e;
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 46214f53386d..5b18a066e644 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
 
 #define SMU11_DRIVER_IF_VERSION_INV 0x
 #define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x0D
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x0F
 #define SMU11_DRIVER_IF_VERSION_NV10 0x33
 #define SMU11_DRIVER_IF_VERSION_NV12 0x33
 #define SMU11_DRIVER_IF_VERSION_NV14 0x34
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/powerplay: add lock protection for swSMU APIs

2019-10-17 Thread Quan, Evan


-Original Message-
From: Grodzovsky, Andrey  
Sent: Thursday, October 17, 2019 10:22 PM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: add lock protection for swSMU APIs


On 10/16/19 11:55 PM, Quan, Evan wrote:
> This is a quick and low risk fix. Those APIs which
> are exposed to other IPs or to support sysfs/hwmon
> interfaces or DAL will have lock protection. Meanwhile
> no lock protection is enforced for swSMU internal used
> APIs. Future optimization is needed.


Does it mean that there is still risk of collision on SMU access between 
external API function to internal one ?

[Quan, Evan] should not. Neither SMU or other IPs should access those internal 
APIs directly after SMU ip setup completely(after late_int).
The access should always be through those external APIs. In fact I run a 
compute stress test over night with 10 terminals accessing the amdgpu_pm_info 
sysfs at the same time and did not see any problem. So, the implementation 
should be safe.
The "optimization" mentioned here is about code style and readability.

Andrey


>
> Change-Id: I8392652c9da1574a85acd9b171f04380f3630852
> Signed-off-by: Evan Quan 
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c   |   6 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h   |   6 -
>   drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c|  23 +-
>   .../amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c  |   4 +-
>   drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 684 --
>   .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h| 163 +++--
>   drivers/gpu/drm/amd/powerplay/navi10_ppt.c|  15 +-
>   drivers/gpu/drm/amd/powerplay/renoir_ppt.c|  12 +-
>   drivers/gpu/drm/amd/powerplay/smu_v11_0.c |   7 +-
>   drivers/gpu/drm/amd/powerplay/vega20_ppt.c|   6 +-
>   10 files changed, 773 insertions(+), 153 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> index 263265245e19..28d32725285b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> @@ -912,7 +912,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool 
> low)
>   if (is_support_sw_smu(adev)) {
>   ret = smu_get_dpm_freq_range(>smu, SMU_GFXCLK,
>low ? _freq : NULL,
> -  !low ? _freq : NULL);
> +  !low ? _freq : NULL,
> +  true);
>   if (ret)
>   return 0;
>   return clk_freq * 100;
> @@ -930,7 +931,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool 
> low)
>   if (is_support_sw_smu(adev)) {
>   ret = smu_get_dpm_freq_range(>smu, SMU_UCLK,
>low ? _freq : NULL,
> -  !low ? _freq : NULL);
> +  !low ? _freq : NULL,
> +  true);
>   if (ret)
>   return 0;
>   return clk_freq * 100;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> index 1c5c0fd76dbf..2cfb677272af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> @@ -298,12 +298,6 @@ enum amdgpu_pcie_gen {
>   #define amdgpu_dpm_get_current_power_state(adev) \
>   
> ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
>   
> -#define amdgpu_smu_get_current_power_state(adev) \
> - ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
> -
> -#define amdgpu_smu_set_power_state(adev) \
> - ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
> -
>   #define amdgpu_dpm_get_pp_num_states(adev, data) \
>   
> ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, 
> data))
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index c50d5f1e75e5..36f36b35000d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -211,7 +211,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
>   
>   if (is_support_sw_smu(adev)) {
>   if (adev->smu.ppt_funcs->get_current_power_state)
> - pm = amdgpu_smu_get_current_power_state(adev);
> + pm = smu_get_current_power_state(>smu);
>   else
>   pm = adev->pm.dpm.user_state;
>   } else if (adev->powerplay.pp_funcs->get_current_power_state) {
> @@ -957,7 +957,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
>   return ret;
>   
>   if (is_support_sw_smu(adev))
> - ret = smu_force_clk_levels(>smu, SMU_SCLK, mask);
> + ret = 

radeon backtrace on fedora 31

2019-10-17 Thread Dave Airlie
5.3.4-300.fc31.x86_64

seems to be new.

https://retrace.fedoraproject.org/faf/reports/2726149/


Dave.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu/powerplay: use local renoir array sizes for clock fetching

2019-10-17 Thread Liang, Prike
Reviewed-by: Prike Liang 

> -Original Message-
> From: amd-gfx  On Behalf Of Alex
> Deucher
> Sent: Friday, October 18, 2019 12:00 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander 
> Subject: [PATCH] drm/amdgpu/powerplay: use local renoir array sizes for
> clock fetching
> 
> To avoid walking past the end of the arrays since the PP_SMU defines don't
> match the renoir defines.
> 
> Signed-off-by: Alex Deucher 
> ---
>  drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 8 
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> index fa314c275a82..f0c8d1ad2a80 100644
> --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> @@ -427,22 +427,22 @@ static int renoir_get_dpm_clock_table(struct
> smu_context *smu, struct dpm_clocks
>   if (!clock_table || !table)
>   return -EINVAL;
> 
> - for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
> + for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
>   clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
>   clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
>   }
> 
> - for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) {
> + for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
>   clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
>   clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
>   }
> 
> - for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
> + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
>   clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
>   clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
>   }
> 
> - for (i = 0; i<  PP_SMU_NUM_MEMCLK_DPM_LEVELS; i++) {
> + for (i = 0; i<  NUM_MEMCLK_DPM_LEVELS; i++) {
>   clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
>   clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
>   }
> --
> 2.23.0
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons

2019-10-17 Thread Zeng, Oak
Is it the design that we have to disable cstate before r/w df, 

or this is only a workaround? - in this case we need to work with df to figure 
out the root cause of the hang.

Regards,
Oak

-Original Message-
From: amd-gfx  On Behalf Of Kim, Jonathan
Sent: Wednesday, October 16, 2019 8:50 PM
To: amd-gfx@lists.freedesktop.org
Cc: Kuehling, Felix ; Quan, Evan 
Subject: RE: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons

+ Felix

-Original Message-
From: Kim, Jonathan  
Sent: Wednesday, October 16, 2019 8:49 PM
To: amd-gfx@lists.freedesktop.org
Cc: felix.keuhl...@amd.com; Quan, Evan ; Kim, Jonathan 
; Kim, Jonathan 
Subject: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons

read or writes to df registers when gpu df is in c-states will result in hang.  
df c-states should be disabled prior to read or writes then re-enabled after 
read or writes.

Change-Id: I6d5a83e4fe13e29c73dfb03a94fe7c611e867fec
Signed-off-by: Jonathan Kim 
---
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 21 -
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c 
b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 16fbd2bc8ad1..9a58416662e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -102,6 +102,9 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
+   if (smu_set_df_cstate(>smu, 0))
+   return 0x;
+
spin_lock_irqsave(>pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
WREG32(data, ficaa_val);
@@ -114,6 +117,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
 
spin_unlock_irqrestore(>pcie_idx_lock, flags);
 
+   smu_set_df_cstate(>smu, 1);
+
return (((ficadh_val & 0x) << 32) | ficadl_val);  }
 
@@ -125,6 +130,9 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, 
uint32_t ficaa_val,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
+   if (smu_set_df_cstate(>smu, 0))
+   return;
+
spin_lock_irqsave(>pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
WREG32(data, ficaa_val);
@@ -134,8 +142,9 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, 
uint32_t ficaa_val,
 
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
WREG32(data, ficadh_val);
-
spin_unlock_irqrestore(>pcie_idx_lock, flags);
+
+   smu_set_df_cstate(>smu, 1);
 }
 
 /*
@@ -153,12 +162,17 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device 
*adev,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
+   if (smu_set_df_cstate(>smu, 0))
+   return;
+
spin_lock_irqsave(>pcie_idx_lock, flags);
WREG32(address, lo_addr);
*lo_val = RREG32(data);
WREG32(address, hi_addr);
*hi_val = RREG32(data);
spin_unlock_irqrestore(>pcie_idx_lock, flags);
+
+   smu_set_df_cstate(>smu, 1);
 }
 
 /*
@@ -175,12 +189,17 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device 
*adev, uint32_t lo_addr,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
+   if (smu_set_df_cstate(>smu, 0))
+   return;
+
spin_lock_irqsave(>pcie_idx_lock, flags);
WREG32(address, lo_addr);
WREG32(data, lo_val);
WREG32(address, hi_addr);
WREG32(data, hi_val);
spin_unlock_irqrestore(>pcie_idx_lock, flags);
+
+   smu_set_df_cstate(>smu, 1);
 }
 
 /* get the number of df counters available */
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: Stack out of bounds in KFD on Arcturus

2019-10-17 Thread Grodzovsky, Andrey
Not that I aware of, is there a special Kconfig flag to determine stack 
size ?

Andrey

On 10/17/19 5:29 PM, Kuehling, Felix wrote:
> I don't see why this problem would be specific to Arcturus. I don't see
> any excessive allocations on the stack either. Also the code involved
> here hasn't changed recently.
>
> Are you using some weird kernel config with a smaller stack? Is it
> specific to a compiler version or some optimization flags? I've
> sometimes seen function inlining cause excessive stack usage.
>
> Regards,
>     Felix
>
> On 2019-10-17 4:09 p.m., Grodzovsky, Andrey wrote:
>> He Felix - I see this on boot when working with Arcturus.
>>
>> Andrey
>>
>>
>> [  103.602092] kfd kfd: Allocated 3969056 bytes on gart
>> [  103.610769]
>> ==
>> [  103.611469] BUG: KASAN: stack-out-of-bounds in
>> kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
>> [  103.611646] Read of size 4 at addr 8883cb19ee38 by task modprobe/1122
>>
>> [  103.611836] CPU: 3 PID: 1122 Comm: modprobe Tainted: G
>> O  5.3.0-rc3+ #45
>> [  103.611847] Hardware name: System manufacturer System Product
>> Name/Z170-PRO, BIOS 1902 06/27/2016
>> [  103.611856] Call Trace:
>> [  103.611879]  dump_stack+0x71/0xab
>> [  103.611907]  print_address_description+0x1da/0x3c0
>> [  103.612453]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
>> [  103.612479]  __kasan_report+0x13f/0x1a0
>> [  103.613022]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
>> [  103.613580]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
>> [  103.613604]  kasan_report+0xe/0x20
>> [  103.614149]  kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
>> [  103.614762]  ? kfd_fill_gpu_memory_affinity+0x110/0x110 [amdgpu]
>> [  103.614796]  ? __alloc_pages_nodemask+0x2c9/0x560
>> [  103.614824]  ? __alloc_pages_slowpath+0x1390/0x1390
>> [  103.614898]  ? kmalloc_order+0x63/0x70
>> [  103.615469]  kfd_create_crat_image_virtual+0x70c/0x770 [amdgpu]
>> [  103.616054]  ? kfd_create_crat_image_acpi+0x1c0/0x1c0 [amdgpu]
>> [  103.616095]  ? up_write+0x4b/0x70
>> [  103.616649]  kfd_topology_add_device+0x98d/0xb10 [amdgpu]
>> [  103.617207]  ? kfd_topology_shutdown+0x60/0x60 [amdgpu]
>> [  103.617743]  ? start_cpsch+0x2ff/0x3a0 [amdgpu]
>> [  103.61]  ? mutex_lock_io_nested+0xac0/0xac0
>> [  103.617807]  ? __mutex_unlock_slowpath+0xda/0x420
>> [  103.617848]  ? __mutex_unlock_slowpath+0xda/0x420
>> [  103.617877]  ? wait_for_completion+0x200/0x200
>> [  103.618461]  ? start_cpsch+0x38b/0x3a0 [amdgpu]
>> [  103.619011]  ? create_queue_cpsch+0x670/0x670 [amdgpu]
>> [  103.619573]  ? kfd_iommu_device_init+0x92/0x1e0 [amdgpu]
>> [  103.620112]  ? kfd_iommu_resume+0x2c/0x2c0 [amdgpu]
>> [  103.620655]  ? kfd_iommu_check_device+0xf0/0xf0 [amdgpu]
>> [  103.621228]  kgd2kfd_device_init+0x474/0x870 [amdgpu]
>> [  103.621781]  amdgpu_amdkfd_device_init+0x291/0x390 [amdgpu]
>> [  103.622329]  ? amdgpu_amdkfd_device_probe+0x90/0x90 [amdgpu]
>> [  103.622344]  ? kmsg_dump_rewind_nolock+0x59/0x59
>> [  103.622895]  ? amdgpu_ras_eeprom_test+0x71/0x90 [amdgpu]
>> [  103.623424]  amdgpu_device_init+0x1bbe/0x2f00 [amdgpu]
>> [  103.623819]  ? amdgpu_device_has_dc_support+0x30/0x30 [amdgpu]
>> [  103.623842]  ? __isolate_free_page+0x290/0x290
>> [  103.623852]  ? fs_reclaim_acquire.part.97+0x5/0x30
>> [  103.623891]  ? __alloc_pages_nodemask+0x2c9/0x560
>> [  103.623912]  ? __alloc_pages_slowpath+0x1390/0x1390
>> [  103.623945]  ? kasan_unpoison_shadow+0x31/0x40
>> [  103.623970]  ? kmalloc_order+0x63/0x70
>> [  103.624337]  amdgpu_driver_load_kms+0xd9/0x430 [amdgpu]
>> [  103.624690]  ? amdgpu_register_gpu_instance+0xe0/0xe0 [amdgpu]
>> [  103.624756]  ? drm_dev_register+0x19c/0x310 [drm]
>> [  103.624768]  ? __kasan_slab_free+0x133/0x160
>> [  103.624849]  drm_dev_register+0x1f5/0x310 [drm]
>> [  103.625212]  amdgpu_pci_probe+0x109/0x1f0 [amdgpu]
>> [  103.625565]  ? amdgpu_pmops_runtime_idle+0xe0/0xe0 [amdgpu]
>> [  103.625580]  local_pci_probe+0x74/0xd0
>> [  103.625603]  pci_device_probe+0x1fa/0x310
>> [  103.625620]  ? pci_device_remove+0x1c0/0x1c0
>> [  103.625640]  ? sysfs_do_create_link_sd.isra.2+0x74/0xe0
>> [  103.625673]  really_probe+0x367/0x5d0
>> [  103.625700]  driver_probe_device+0x177/0x1b0
>> [  103.625721]  device_driver_attach+0x8a/0x90
>> [  103.625737]  ? device_driver_attach+0x90/0x90
>> [  103.625746]  __driver_attach+0xeb/0x190
>> [  103.625765]  ? device_driver_attach+0x90/0x90
>> [  103.625773]  bus_for_each_dev+0xe4/0x160
>> [  103.625789]  ? subsys_dev_iter_exit+0x10/0x10
>> [  103.625829]  bus_add_driver+0x277/0x330
>> [  103.625855]  driver_register+0xc6/0x1a0
>> [  103.625866]  ? 0xa0d88000
>> [  103.625880]  do_one_initcall+0xd3/0x334
>> [  103.625895]  ? trace_event_raw_event_initcall_finish+0x150/0x150
>> [  103.625911]  ? kasan_unpoison_shadow+0x31/0x40
>> [  103.625924]  ? __kasan_kmalloc+0xd5/0xf0
>> [  103.625946]  ? kmem_cache_alloc_trace+0x154/0x300
>> [  103.625955]  ? 

Re: Stack out of bounds in KFD on Arcturus

2019-10-17 Thread Kuehling, Felix
I don't see why this problem would be specific to Arcturus. I don't see 
any excessive allocations on the stack either. Also the code involved 
here hasn't changed recently.

Are you using some weird kernel config with a smaller stack? Is it 
specific to a compiler version or some optimization flags? I've 
sometimes seen function inlining cause excessive stack usage.

Regards,
   Felix

On 2019-10-17 4:09 p.m., Grodzovsky, Andrey wrote:
> He Felix - I see this on boot when working with Arcturus.
>
> Andrey
>
>
> [  103.602092] kfd kfd: Allocated 3969056 bytes on gart
> [  103.610769]
> ==
> [  103.611469] BUG: KASAN: stack-out-of-bounds in
> kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
> [  103.611646] Read of size 4 at addr 8883cb19ee38 by task modprobe/1122
>
> [  103.611836] CPU: 3 PID: 1122 Comm: modprobe Tainted: G
> O  5.3.0-rc3+ #45
> [  103.611847] Hardware name: System manufacturer System Product
> Name/Z170-PRO, BIOS 1902 06/27/2016
> [  103.611856] Call Trace:
> [  103.611879]  dump_stack+0x71/0xab
> [  103.611907]  print_address_description+0x1da/0x3c0
> [  103.612453]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
> [  103.612479]  __kasan_report+0x13f/0x1a0
> [  103.613022]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
> [  103.613580]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
> [  103.613604]  kasan_report+0xe/0x20
> [  103.614149]  kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
> [  103.614762]  ? kfd_fill_gpu_memory_affinity+0x110/0x110 [amdgpu]
> [  103.614796]  ? __alloc_pages_nodemask+0x2c9/0x560
> [  103.614824]  ? __alloc_pages_slowpath+0x1390/0x1390
> [  103.614898]  ? kmalloc_order+0x63/0x70
> [  103.615469]  kfd_create_crat_image_virtual+0x70c/0x770 [amdgpu]
> [  103.616054]  ? kfd_create_crat_image_acpi+0x1c0/0x1c0 [amdgpu]
> [  103.616095]  ? up_write+0x4b/0x70
> [  103.616649]  kfd_topology_add_device+0x98d/0xb10 [amdgpu]
> [  103.617207]  ? kfd_topology_shutdown+0x60/0x60 [amdgpu]
> [  103.617743]  ? start_cpsch+0x2ff/0x3a0 [amdgpu]
> [  103.61]  ? mutex_lock_io_nested+0xac0/0xac0
> [  103.617807]  ? __mutex_unlock_slowpath+0xda/0x420
> [  103.617848]  ? __mutex_unlock_slowpath+0xda/0x420
> [  103.617877]  ? wait_for_completion+0x200/0x200
> [  103.618461]  ? start_cpsch+0x38b/0x3a0 [amdgpu]
> [  103.619011]  ? create_queue_cpsch+0x670/0x670 [amdgpu]
> [  103.619573]  ? kfd_iommu_device_init+0x92/0x1e0 [amdgpu]
> [  103.620112]  ? kfd_iommu_resume+0x2c/0x2c0 [amdgpu]
> [  103.620655]  ? kfd_iommu_check_device+0xf0/0xf0 [amdgpu]
> [  103.621228]  kgd2kfd_device_init+0x474/0x870 [amdgpu]
> [  103.621781]  amdgpu_amdkfd_device_init+0x291/0x390 [amdgpu]
> [  103.622329]  ? amdgpu_amdkfd_device_probe+0x90/0x90 [amdgpu]
> [  103.622344]  ? kmsg_dump_rewind_nolock+0x59/0x59
> [  103.622895]  ? amdgpu_ras_eeprom_test+0x71/0x90 [amdgpu]
> [  103.623424]  amdgpu_device_init+0x1bbe/0x2f00 [amdgpu]
> [  103.623819]  ? amdgpu_device_has_dc_support+0x30/0x30 [amdgpu]
> [  103.623842]  ? __isolate_free_page+0x290/0x290
> [  103.623852]  ? fs_reclaim_acquire.part.97+0x5/0x30
> [  103.623891]  ? __alloc_pages_nodemask+0x2c9/0x560
> [  103.623912]  ? __alloc_pages_slowpath+0x1390/0x1390
> [  103.623945]  ? kasan_unpoison_shadow+0x31/0x40
> [  103.623970]  ? kmalloc_order+0x63/0x70
> [  103.624337]  amdgpu_driver_load_kms+0xd9/0x430 [amdgpu]
> [  103.624690]  ? amdgpu_register_gpu_instance+0xe0/0xe0 [amdgpu]
> [  103.624756]  ? drm_dev_register+0x19c/0x310 [drm]
> [  103.624768]  ? __kasan_slab_free+0x133/0x160
> [  103.624849]  drm_dev_register+0x1f5/0x310 [drm]
> [  103.625212]  amdgpu_pci_probe+0x109/0x1f0 [amdgpu]
> [  103.625565]  ? amdgpu_pmops_runtime_idle+0xe0/0xe0 [amdgpu]
> [  103.625580]  local_pci_probe+0x74/0xd0
> [  103.625603]  pci_device_probe+0x1fa/0x310
> [  103.625620]  ? pci_device_remove+0x1c0/0x1c0
> [  103.625640]  ? sysfs_do_create_link_sd.isra.2+0x74/0xe0
> [  103.625673]  really_probe+0x367/0x5d0
> [  103.625700]  driver_probe_device+0x177/0x1b0
> [  103.625721]  device_driver_attach+0x8a/0x90
> [  103.625737]  ? device_driver_attach+0x90/0x90
> [  103.625746]  __driver_attach+0xeb/0x190
> [  103.625765]  ? device_driver_attach+0x90/0x90
> [  103.625773]  bus_for_each_dev+0xe4/0x160
> [  103.625789]  ? subsys_dev_iter_exit+0x10/0x10
> [  103.625829]  bus_add_driver+0x277/0x330
> [  103.625855]  driver_register+0xc6/0x1a0
> [  103.625866]  ? 0xa0d88000
> [  103.625880]  do_one_initcall+0xd3/0x334
> [  103.625895]  ? trace_event_raw_event_initcall_finish+0x150/0x150
> [  103.625911]  ? kasan_unpoison_shadow+0x31/0x40
> [  103.625924]  ? __kasan_kmalloc+0xd5/0xf0
> [  103.625946]  ? kmem_cache_alloc_trace+0x154/0x300
> [  103.625955]  ? kasan_unpoison_shadow+0x31/0x40
> [  103.625985]  do_init_module+0xec/0x354
> [  103.626011]  load_module+0x3c91/0x4980
> [  103.626118]  ? module_frob_arch_sections+0x20/0x20
> [  103.626132]  ? ima_read_file+0x10/0x10
> [  103.626142] 

RE: [PATCH v2] drm/amd/display: Modify display link stream setup sequence.

2019-10-17 Thread Liu, Zhan
Thx! Will do it.

Zhan

From: Kazlauskas, Nicholas 
Sent: 2019/October/17, Thursday 4:51 PM
To: Liu, Zhan ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH v2] drm/amd/display: Modify display link stream setup 
sequence.

This is actually setting DIG mode a second time, right? I don't think this is 
what sets GC_SEND.

Please mention that this is setting the DIG_MODE to the correct value after 
having been overridden by the call to transmitter control in your patch 
description. Also correct the HACK comment to mention that this second call is 
needed to reconfigure the DIG as a workaround for the incorrect value being 
applied from transmitter control. Specifics help in source.

I don't think there is a case where we'd want HDMI changed to DVI so it's 
probably fine to leave this as is for now...

With those fixed, you can add my:

Reviewed-by: Nicholas Kazlauskas 
mailto:nicholas.kazlaus...@amd.com>>

Thanks,

Nicholas Kazlauskas


From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Liu, Zhan mailto:zhan@amd.com>>
Sent: Thursday, October 17, 2019, 3:04 PM
To: amd-gfx@lists.freedesktop.org; Liu, 
Zhan
Subject: [PATCH v2] drm/amd/display: Modify display link stream setup sequence.


[Why]
This patch is for fixing Navi14 pink screen issue. With this
patch, stream->link->link_enc->funcs->setup will be called
twice: this will make sure GC_SEND is set to 1. Though we
still need to look into why the issue only happens on
Linux, but not on Windows side.

[How]
Call stream->link->link_enc->funcs->setup twice.

Signed-off-by: Zhan liu mailto:zhan@amd.com>>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 935053664160..8683e8613ec2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2842,6 +2842,12 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);

+   /* Hack on Navi14: fixes Navi14 HDMI pink screen issue */
+   if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+   stream->link->link_enc->funcs->setup(
+   stream->link->link_enc,
+   pipe_ctx->stream->signal);
+
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
--
2.17.0
___
amd-gfx mailing list
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
amd-gfx@lists.freedesktop.org
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH v2] drm/amd/display: Modify display link stream setup sequence.

2019-10-17 Thread Kazlauskas, Nicholas
This is actually setting DIG mode a second time, right? I don't think this is 
what sets GC_SEND.

Please mention that this is setting the DIG_MODE to the correct value after 
having been overridden by the call to transmitter control in your patch 
description. Also correct the HACK comment to mention that this second call is 
needed to reconfigure the DIG as a workaround for the incorrect value being 
applied from transmitter control. Specifics help in source.

I don't think there is a case where we'd want HDMI changed to DVI so it's 
probably fine to leave this as is for now...

With those fixed, you can add my:

Reviewed-by: Nicholas Kazlauskas 

Thanks,

Nicholas Kazlauskas


From: amd-gfx  on behalf of Liu, Zhan 

Sent: Thursday, October 17, 2019, 3:04 PM
To: amd-gfx@lists.freedesktop.org; Liu, Zhan
Subject: [PATCH v2] drm/amd/display: Modify display link stream setup sequence.

[Why]
This patch is for fixing Navi14 pink screen issue. With this
patch, stream->link->link_enc->funcs->setup will be called
twice: this will make sure GC_SEND is set to 1. Though we
still need to look into why the issue only happens on
Linux, but not on Windows side.

[How]
Call stream->link->link_enc->funcs->setup twice.

Signed-off-by: Zhan liu 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 935053664160..8683e8613ec2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2842,6 +2842,12 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);

+   /* Hack on Navi14: fixes Navi14 HDMI pink screen issue */
+   if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+   stream->link->link_enc->funcs->setup(
+   stream->link->link_enc,
+   pipe_ctx->stream->signal);
+
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
--
2.17.0
___
amd-gfx mailing list
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
amd-gfx@lists.freedesktop.org
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: Spontaneous reboots when using RX 560

2019-10-17 Thread Sylvain Munaut
So a bit more testing.

I was using a bit of "unusual" config I guess, having 2 GPUs and some
other pcie cards (10G, ..).
So I simplified and went to the most standard thing I could think of,
_just_ the RX 560 card plugged into the main PCIe 16x slot directly
connected to the CPU.

And exact same results, no change in behavior.

So on one hand I'm happy that the other cards and having the AMD GPU
in the second slot isn't the issue (because I really need that config
that way), but on the other, I'm no closer to finding the issue :/

Cheers,

 Sylvain Munaut
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Stack out of bounds in KFD on Arcturus

2019-10-17 Thread Grodzovsky, Andrey
He Felix - I see this on boot when working with Arcturus.

Andrey


[  103.602092] kfd kfd: Allocated 3969056 bytes on gart
[  103.610769] 
==
[  103.611469] BUG: KASAN: stack-out-of-bounds in 
kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
[  103.611646] Read of size 4 at addr 8883cb19ee38 by task modprobe/1122

[  103.611836] CPU: 3 PID: 1122 Comm: modprobe Tainted: G   
O  5.3.0-rc3+ #45
[  103.611847] Hardware name: System manufacturer System Product 
Name/Z170-PRO, BIOS 1902 06/27/2016
[  103.611856] Call Trace:
[  103.611879]  dump_stack+0x71/0xab
[  103.611907]  print_address_description+0x1da/0x3c0
[  103.612453]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
[  103.612479]  __kasan_report+0x13f/0x1a0
[  103.613022]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
[  103.613580]  ? kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
[  103.613604]  kasan_report+0xe/0x20
[  103.614149]  kfd_create_vcrat_image_gpu+0x5db/0xb80 [amdgpu]
[  103.614762]  ? kfd_fill_gpu_memory_affinity+0x110/0x110 [amdgpu]
[  103.614796]  ? __alloc_pages_nodemask+0x2c9/0x560
[  103.614824]  ? __alloc_pages_slowpath+0x1390/0x1390
[  103.614898]  ? kmalloc_order+0x63/0x70
[  103.615469]  kfd_create_crat_image_virtual+0x70c/0x770 [amdgpu]
[  103.616054]  ? kfd_create_crat_image_acpi+0x1c0/0x1c0 [amdgpu]
[  103.616095]  ? up_write+0x4b/0x70
[  103.616649]  kfd_topology_add_device+0x98d/0xb10 [amdgpu]
[  103.617207]  ? kfd_topology_shutdown+0x60/0x60 [amdgpu]
[  103.617743]  ? start_cpsch+0x2ff/0x3a0 [amdgpu]
[  103.61]  ? mutex_lock_io_nested+0xac0/0xac0
[  103.617807]  ? __mutex_unlock_slowpath+0xda/0x420
[  103.617848]  ? __mutex_unlock_slowpath+0xda/0x420
[  103.617877]  ? wait_for_completion+0x200/0x200
[  103.618461]  ? start_cpsch+0x38b/0x3a0 [amdgpu]
[  103.619011]  ? create_queue_cpsch+0x670/0x670 [amdgpu]
[  103.619573]  ? kfd_iommu_device_init+0x92/0x1e0 [amdgpu]
[  103.620112]  ? kfd_iommu_resume+0x2c/0x2c0 [amdgpu]
[  103.620655]  ? kfd_iommu_check_device+0xf0/0xf0 [amdgpu]
[  103.621228]  kgd2kfd_device_init+0x474/0x870 [amdgpu]
[  103.621781]  amdgpu_amdkfd_device_init+0x291/0x390 [amdgpu]
[  103.622329]  ? amdgpu_amdkfd_device_probe+0x90/0x90 [amdgpu]
[  103.622344]  ? kmsg_dump_rewind_nolock+0x59/0x59
[  103.622895]  ? amdgpu_ras_eeprom_test+0x71/0x90 [amdgpu]
[  103.623424]  amdgpu_device_init+0x1bbe/0x2f00 [amdgpu]
[  103.623819]  ? amdgpu_device_has_dc_support+0x30/0x30 [amdgpu]
[  103.623842]  ? __isolate_free_page+0x290/0x290
[  103.623852]  ? fs_reclaim_acquire.part.97+0x5/0x30
[  103.623891]  ? __alloc_pages_nodemask+0x2c9/0x560
[  103.623912]  ? __alloc_pages_slowpath+0x1390/0x1390
[  103.623945]  ? kasan_unpoison_shadow+0x31/0x40
[  103.623970]  ? kmalloc_order+0x63/0x70
[  103.624337]  amdgpu_driver_load_kms+0xd9/0x430 [amdgpu]
[  103.624690]  ? amdgpu_register_gpu_instance+0xe0/0xe0 [amdgpu]
[  103.624756]  ? drm_dev_register+0x19c/0x310 [drm]
[  103.624768]  ? __kasan_slab_free+0x133/0x160
[  103.624849]  drm_dev_register+0x1f5/0x310 [drm]
[  103.625212]  amdgpu_pci_probe+0x109/0x1f0 [amdgpu]
[  103.625565]  ? amdgpu_pmops_runtime_idle+0xe0/0xe0 [amdgpu]
[  103.625580]  local_pci_probe+0x74/0xd0
[  103.625603]  pci_device_probe+0x1fa/0x310
[  103.625620]  ? pci_device_remove+0x1c0/0x1c0
[  103.625640]  ? sysfs_do_create_link_sd.isra.2+0x74/0xe0
[  103.625673]  really_probe+0x367/0x5d0
[  103.625700]  driver_probe_device+0x177/0x1b0
[  103.625721]  device_driver_attach+0x8a/0x90
[  103.625737]  ? device_driver_attach+0x90/0x90
[  103.625746]  __driver_attach+0xeb/0x190
[  103.625765]  ? device_driver_attach+0x90/0x90
[  103.625773]  bus_for_each_dev+0xe4/0x160
[  103.625789]  ? subsys_dev_iter_exit+0x10/0x10
[  103.625829]  bus_add_driver+0x277/0x330
[  103.625855]  driver_register+0xc6/0x1a0
[  103.625866]  ? 0xa0d88000
[  103.625880]  do_one_initcall+0xd3/0x334
[  103.625895]  ? trace_event_raw_event_initcall_finish+0x150/0x150
[  103.625911]  ? kasan_unpoison_shadow+0x31/0x40
[  103.625924]  ? __kasan_kmalloc+0xd5/0xf0
[  103.625946]  ? kmem_cache_alloc_trace+0x154/0x300
[  103.625955]  ? kasan_unpoison_shadow+0x31/0x40
[  103.625985]  do_init_module+0xec/0x354
[  103.626011]  load_module+0x3c91/0x4980
[  103.626118]  ? module_frob_arch_sections+0x20/0x20
[  103.626132]  ? ima_read_file+0x10/0x10
[  103.626142]  ? vfs_read+0x127/0x190
[  103.626163]  ? kernel_read+0x95/0xb0
[  103.626187]  ? kernel_read_file+0x1a5/0x340
[  103.626277]  ? __do_sys_finit_module+0x175/0x1b0
[  103.626287]  __do_sys_finit_module+0x175/0x1b0
[  103.626301]  ? __ia32_sys_init_module+0x40/0x40
[  103.626338]  ? lock_downgrade+0x390/0x390
[  103.626396]  ? vtime_user_exit+0xc8/0xe0
[  103.626423]  do_syscall_64+0x7d/0x250
[  103.626440]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
[  103.626450] RIP: 0033:0x7f09984854d9
[  103.626461] Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 
48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 

Re: [RFC] drm: Add AMD GFX9+ format modifiers.

2019-10-17 Thread Marek Olšák
On Wed, Oct 16, 2019 at 9:48 AM Bas Nieuwenhuizen 
wrote:

> This adds initial format modifiers for AMD GFX9 and newer GPUs.
>
> This is particularly useful to determine if we can use DCC, and whether
> we need an extra display compatible DCC metadata plane.
>
> Design decisions:
>   - Always expose a single plane
>This way everything works correctly with images with multiple
> planes.
>
>   - Do not add an extra memory region in DCC for putting a bit on whether
> we are in compressed state.
>A decompress on import is cheap enough if already decompressed, and
>I do think in most cases we can avoid it in advance during modifier
>negotiation. The remainder is probably not common enough to worry
>about.
>
>   - Explicitly define the sizes as part of the modifier description instead
> of using whatever the current version of radeonsi does.
>This way we can avoid dedicated buffers and we can make sure we keep
>compatibility across mesa versions. I'd like to put some tests on
>this on ac_surface.c so we can learn early in the process if things
>need to be changed. Furthermore, the lack of configurable strides on
>GFX10 means things already go wrong if we do not agree, making a
>custom stride somewhat less useful.
>

The custom stride will be back for 2D images (not for 3D/Array), so
Navi10-14 will be the only hw not supporting the custom stride for 2D. It
might not be worth adding the width and height into the modifier just
because of Navi10-14, though I don't feel strongly about it.

This patch doesn't add the sizes into the description anyway.

The rest looks good.

Marek


>
>   - No usage of BO metadata at all for modifier usecases.
>To avoid the requirement of dedicated dma bufs per image. For
>non-modifier based interop we still use the BO metadata, since we
>need to keep compatibility with old mesa and this is used for
>depth/msaa/3d/CL etc. API interop.
>
>   - A single FD for all planes.
>Easier in Vulkan / bindless and radeonsi is already transitioning.
>
>   - Make a single modifier for DCN1
>   It defines things uniquely given bpp, which we can assume, so adding
>   more modifier values do not add clarity.
>
>   - Not exposing the 4K and 256B tiling modes.
>   These are largely only better for something like a cursor or very
> long
>   and/or tall images. Are they worth the added complexity to save
> memory?
>   For context, at 32bpp, tiles are 128x128 pixels.
>
>   - For multiplane images, every plane uses the same tiling.
>   On GFX9/GFX10 we can, so no need to make it complicated.
>
>   - We use family_id + external_rev to distinguish between incompatible
> GPUs.
>   PCI ID is not enough, as RAVEN and RAVEN2 have the same PCI device
> id,
>   but different tiling. We might be able to find bigger equivalence
>   groups for _X, but especially for DCC I would be uncomfortable
> making it
>   shared between GPUs.
>
>   - For DCN1 DCC, radeonsi currently uses another texelbuffer with indices
> to reorder. This is not shared.
>   Specific to current implementation and does not need to be shared. To
>   pave the way to shader-based solution, lets keep this internal to
> each
>   driver. This should reduce the modifier churn if any of the driver
>   implementations change. (Especially as you'd want to support the old
>   implementation for a while to stay compatible with old kernels not
>   supporting a new modifier yet).
>
>   - No support for rotated swizzling.
>   Can be added easily later and nothing in the stack would generate it
>   currently.
>
>   - Add extra enum values in the definitions.
>   This way we can easily switch on modifier without having to pass
> around
>   the current GPU everywhere, assuming the modifier has been validated.
> ---
>
>  Since my previous attempt for modifiers got bogged down on details for
>  the GFX6-GFX8 modifiers in previous discussions, this only attempts to
>  define modifiers for GFX9+, which is significantly simpler.
>
>  For a final version I'd like to wait until I have written most of the
>  userspace + kernelspace so we can actually test it. However, I'd
>  appreciate any early feedback people are willing to give.
>
>  Initial Mesa amd/common support + tests are available at
>  https://gitlab.freedesktop.org/bnieuwenhuizen/mesa/tree/modifiers
>
>  I tested the HW to actually behave as described in the descriptions
>  on Raven and plan to test on a subset of the others.
>
>  include/uapi/drm/drm_fourcc.h | 118 ++
>  1 file changed, 118 insertions(+)
>
> diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
> index 3feeaa3f987a..9bd286ab2bee 100644
> --- a/include/uapi/drm/drm_fourcc.h
> +++ b/include/uapi/drm/drm_fourcc.h
> @@ -756,6 +756,124 @@ extern "C" {
>   */
>  #define 

Re: [PATCH] drm/amdgpu/vi: silence an uninitialized variable warning

2019-10-17 Thread Alex Deucher
On Thu, Oct 17, 2019 at 5:12 AM Dan Carpenter  wrote:
>
> Smatch complains that we need to initialized "*cap" otherwise it can
> lead to an uninitialized variable bug in the caller.  This seems like a
> reasonable warning and it doesn't hurt to silence it at least.
>
> drivers/gpu/drm/amd/amdgpu/vi.c:767 vi_asic_reset_method() error: 
> uninitialized symbol 'baco_reset'.
>
> Fixes: 425db2553e43 ("drm/amdgpu: expose BACO interfaces to upper level from 
> PP")
> Signed-off-by: Dan Carpenter 

Applied.  thanks!

Alex

> ---
>  drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 1 +
>  1 file changed, 1 insertion(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
> b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> index 83196b79edd5..f4ff15378e61 100644
> --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> @@ -1421,6 +1421,7 @@ static int pp_get_asic_baco_capability(void *handle, 
> bool *cap)
>  {
> struct pp_hwmgr *hwmgr = handle;
>
> +   *cap = false;
> if (!hwmgr)
> return -EINVAL;
>
> --
> 2.20.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 37/37] drm/amd/display: Apply vactive dram clock change workaround to dcn2 DMLv2

2019-10-17 Thread sunpeng.li
From: Joshua Aberback 

[Why]
This workaround was put in dcn2 DMLv1, and now we need it in DMLv2.

Signed-off-by: Joshua Aberback 
Reviewed-by: Jun Lei 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 841ed6c23f93..3c70dd577292 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2611,7 +2611,8 @@ static void 
dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
 
-   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+   mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = 
dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || 
mode_lib->vba.NumberOfActivePlanes == 1) {
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 29/37] drm/amd/display: Only use EETF when maxCL > max display

2019-10-17 Thread sunpeng.li
From: Krunoslav Kovac 

[Why]
BT.2390 EETF is used for tone mapping/range reduction.
Say display is 0.1 - 500 nits.
The problematic case is when content is 0-400. We apply EETF because
0<0.1 so we need to reduce the range by 0.1.

In the commit, we ignore the bottom range. Most displays map 0 to min and
then have a ramp to 0.1, so sending 0.1 is actually >0.1.
Furthermode, HW that uses 3D LUT also assumes min=0.

Signed-off-by: Krunoslav Kovac 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 85dad356c9d5..1de4805cb8c7 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -959,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex 
*rgb_regamma,
if (fs_params->max_display < 100) // cap at 100 at the top
max_display = dc_fixpt_from_int(100);
 
-   if (fs_params->min_content < fs_params->min_display)
-   use_eetf = true;
-   else
-   min_content = min_display;
-
+   // only max used, we don't adjust min luminance
if (fs_params->max_content > fs_params->max_display)
use_eetf = true;
else
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 27/37] drm/amd/display: do not synchronize "drr" displays

2019-10-17 Thread sunpeng.li
From: Jun Lei 

[why]
A display that supports DRR can never really be considered
"synchronized" with any other display because we can dynamically
enable DRR (i.e. without modeset).  this will cause their
relative CRTC positions to drift and lose sync.  this will disrupt
features such as MCLK switching that assume and depend on
their permanent alignment (that can only change with modeset)

[how]
check for ignore_msa in stream when considered synchronizability
this ignore_msa is basically actually implemented as "supports drr"

Signed-off-by: Jun Lei 
Reviewed-by: Yongqiang Sun 
Acked-by: Anthony Koo 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index da9e2e5f5c0d..8fe39fdefc27 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable(
if (stream1->view_format != stream2->view_format)
return false;
 
+   if (stream1->ignore_msa_timing_param || 
stream2->ignore_msa_timing_param)
+   return false;
+
return true;
 }
 static bool is_dp_and_hdmi_sharable(
@@ -1540,6 +1543,9 @@ bool dc_is_stream_unchanged(
if (!are_stream_backends_same(old_stream, stream))
return false;
 
+   if (old_stream->ignore_msa_timing_param != 
stream->ignore_msa_timing_param)
+   return false;
+
return true;
 }
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 16/37] drm/amd/display: audio endpoint cannot switch

2019-10-17 Thread sunpeng.li
From: Paul Hsieh 

[Why]
On some systems, we need to check the dcn version in runtime
system, not in compile time.

[How]
Stub in dcn version parameter to find_first_free_audio

Signed-off-by: Paul Hsieh 
Reviewed-by: Charlene Liu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 25da0c45d828..da9e2e5f5c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1629,7 +1629,8 @@ static int acquire_first_free_pipe(
 static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
-   enum engine_id id)
+   enum engine_id id,
+   enum dce_version dc_version)
 {
int i, available_audio_count;
 
@@ -1965,7 +1966,7 @@ enum dc_status resource_map_pool_resources(
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
-   >res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+   >res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, 
dc_ctx->dce_version);
 
/*
 * Audio assigned in order first come first get.
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 11/37] drm/amd/display: split dcn20 fast validate into more functions

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Split a large function into smaller, reusable chunks.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Nevenko Stupar 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 182 ++
 .../drm/amd/display/dc/dcn20/dcn20_resource.h |  31 +++
 .../drm/amd/display/dc/dcn21/dcn21_resource.c |   1 +
 3 files changed, 136 insertions(+), 78 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 893e44ca90f8..ab1fc8c5ed10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1612,7 +1612,7 @@ static void swizzle_to_dml_params(
}
 }
 
-static bool dcn20_split_stream_for_odm(
+bool dcn20_split_stream_for_odm(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
@@ -1690,7 +1690,7 @@ static bool dcn20_split_stream_for_odm(
return true;
 }
 
-static void dcn20_split_stream_for_mpc(
+void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
@@ -2148,7 +2148,7 @@ void dcn20_set_mcif_arb_params(
 }
 
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
 {
int i;
 
@@ -2183,7 +2183,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct 
dc_state *new_ctx)
 }
 #endif
 
-static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
@@ -2260,24 +2260,11 @@ static struct pipe_ctx 
*dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
 }
 
-bool dcn20_fast_validate_bw(
+void dcn20_merge_pipes_for_validate(
struct dc *dc,
-   struct dc_state *context,
-   display_e2e_pipe_params_st *pipes,
-   int *pipe_cnt_out,
-   int *pipe_split_from,
-   int *vlevel_out)
+   struct dc_state *context)
 {
-   bool out = false;
-
-   int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
-   bool force_split = false;
-   int split_threshold = dc->res_pool->pipe_count / 2;
-   bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
-
-   ASSERT(pipes);
-   if (!pipes)
-   return false;
+   int i;
 
/* merge previously split odm pipes since mode support needs to make 
the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2332,31 +2319,18 @@ bool dcn20_fast_validate_bw(
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
+}
 
-   if (dc->res_pool->funcs->populate_dml_pipes)
-   pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
-   >res_ctx, pipes);
-   else
-   pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
-   >res_ctx, pipes);
-
-   *pipe_cnt_out = pipe_cnt;
-
-   if (!pipe_cnt) {
-   out = true;
-   goto validate_out;
-   }
-
-   vlevel = dml_get_voltage_level(>bw_ctx.dml, pipes, pipe_cnt);
-
-   if (vlevel > context->bw_ctx.dml.soc.num_states)
-   goto validate_fail;
-
-   /*initialize pipe_just_split_from to invalid idx*/
-   for (i = 0; i < MAX_PIPES; i++)
-   pipe_split_from[i] = -1;
+int dcn20_validate_apply_pipe_split_flags(
+   struct dc *dc,
+   struct dc_state *context,
+   int vlevel,
+   bool *split)
+{
+   int i, pipe_idx, vlevel_unsplit;
+   bool force_split = false;
+   bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 
-   /* Single display only conditionals get set here */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = >res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2383,38 +2357,105 @@ bool dcn20_fast_validate_bw(
if (exit_loop)
break;
}
-
-   if (context->stream_count > split_threshold)
+   /* TODO: fix dc bugs and remove this split threshold thing */
+   if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
 
-   vlevel_unsplit = vlevel;
+   if (avoid_split) {
+   for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+   if (!context->res_ctx.pipe_ctx[i].stream)
+   continue;
+
+   for 

[PATCH 15/37] drm/amd/display: Fix MPO & pipe split on 3-pipe dcn2x

2019-10-17 Thread sunpeng.li
From: Michael Strauss 

[WHY]
DML is incorrectly initialized with 4 pipes on 3 pipe configs
RequiredDPPCLK is halved on unsplit pipe due to an incorrectly handled 3 pipe
case, causing underflow with 2 planes & pipe split (MPO, 8K + 2nd display)

[HOW]
Set correct number of DPP/OTGs for dml init to generate correct DPP topology
Double RequiredDPPCLK after clock is halved for pipe split
and find_secondary_pipe fails to fix underflow

Signed-off-by: Michael Strauss 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 5 +++--
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 8 
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 9bc0ffad7093..2596d4ac6263 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2482,9 +2482,10 @@ bool dcn20_fast_validate_bw(
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, 
>res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
-   if (!hsplit_pipe)
+   if (!hsplit_pipe) {
+   
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]
 *= 2;
continue;
-
+   }
if 
(context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
>res_ctx, 
dc->res_pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index d2e851e7a97f..5e3b48bb04f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1722,6 +1722,14 @@ static bool construct(
 
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
 
+   uint32_t num_pipes = dcn2_1_ip.max_num_dpp;
+
+   for (i = 0; i < dcn2_1_ip.max_num_dpp; i++)
+   if (pipe_fuses & 1 << i)
+   num_pipes--;
+   dcn2_1_ip.max_num_dpp = num_pipes;
+   dcn2_1_ip.max_num_otg = num_pipes;
+
dml_init_instance(>dml, _1_soc, _1_ip, DML_PROJECT_DCN21);
 
init_data.ctx = dc->ctx;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 26/37] drm/amd/display: Proper return of result when aux engine acquire fails

2019-10-17 Thread sunpeng.li
From: Anthony Koo 

[Why]
When aux engine acquire fails, we missed populating the operation_result
that describes the failure reason.

[How]
Set operation_result to new type:
AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
in the case aux engine acquire has failed.

Signed-off-by: Anthony Koo 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 1 +
 drivers/gpu/drm/amd/display/dc/dc_ddc_types.h   | 3 ++-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c| 5 -
 3 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 3af2b429ff1b..779d0b60cac9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -113,6 +113,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = -EIO;
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+   case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
result = -EBUSY;
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h 
b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 4ef97f65e55d..4f8f576d5fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -49,7 +49,8 @@ enum aux_channel_operation_result {
AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
-   AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+   AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON,
+   AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
 };
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 976bd4987a28..22abb345ddc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -538,8 +538,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
memset(_rep, 0, sizeof(aux_rep));
 
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
-   if (!acquire(aux_engine, ddc_pin))
+   if (!acquire(aux_engine, ddc_pin)) {
+   *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE;
return -1;
+   }
 
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -663,6 +665,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
 
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+   case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
default:
goto fail;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 09/37] drm/amd/display: Enable PSR

2019-10-17 Thread sunpeng.li
From: Roman Li 

[Why]
PSR (Panel Self-Refresh) is a power-saving feature for eDP panels.
The feature has support in DMCU (Display MicroController Unit).
DMCU/driver communication is implemented in DC.
DM can use existing DC PSR interface to use PSR feature.

[How]
- Read psr caps via dpcd
- Send vsc infoframe if panel supports psr
- Disable psr before h/w programming (FULL_UPDATE)
- Enable psr after h/w programming
- Disable psr for fb console

Signed-off-by: Roman Li 
Reviewed-by: Nicholas Kazlauskas 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 133 +-
 1 file changed, 130 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 10cce584719f..5e3bf4f86e52 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -147,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 static void handle_cursor_update(struct drm_plane *plane,
 struct drm_plane_state *old_plane_state);
 
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+
+
 /*
  * dm_vblank_get_counter
  *
@@ -2418,6 +2424,7 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
+   amdgpu_dm_set_psr_caps(link);
}
 
 
@@ -3813,7 +3820,16 @@ create_stream_for_sink(struct amdgpu_dm_connector 
*aconnector,
 
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, >vsp_infopacket, 
false, false);
+   if (stream->link->psr_feature_enabled)  {
+   struct dc  *core_dc = stream->link->ctx->dc;
 
+   if (dc_is_dmcu_initialized(core_dc)) {
+   struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+   stream->psr_version = dmcu->dmcu_version.psr_version;
+   mod_build_vsc_infopacket(stream, 
>vsc_infopacket);
+   }
+   }
 finish:
dc_sink_release(sink);
 
@@ -5908,6 +5924,7 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
+   bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -5953,6 +5970,9 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 
dc_plane = dm_new_plane_state->dc_state;
 
+   if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
+   swizzle = false;
+
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = 
dc_plane->gamma_correction;
@@ -6144,14 +6164,29 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
_state->vrr_params.adjust);
spin_unlock_irqrestore(>dev->event_lock, flags);
}
-
mutex_lock(>dc_lock);
+   if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+   acrtc_state->stream->link->psr_allow_active)
+   amdgpu_dm_psr_disable(acrtc_state->stream);
+
dc_commit_updates_for_stream(dm->dc,
 bundle->surface_updates,
 planes_count,
 acrtc_state->stream,
 >stream_update,
 dc_state);
+
+   if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+   
acrtc_state->stream->psr_version &&
+   
!acrtc_state->stream->link->psr_feature_enabled)
+   amdgpu_dm_link_setup_psr(acrtc_state->stream);
+   else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
+   
acrtc_state->stream->link->psr_feature_enabled &&
+   
!acrtc_state->stream->link->psr_allow_active &&
+   swizzle) {
+ 

[PATCH 06/37] drm/amd/display: remove unused code

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Commit hints are unnecessary after front end programming redesign.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Eric Bernstein 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 2 --
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 5 -
 drivers/gpu/drm/amd/display/dc/inc/core_types.h   | 4 
 3 files changed, 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 41b51f43a64b..55b82ca44c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1248,8 +1248,6 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
 
-   memset(>commit_hints, 0, sizeof(context->commit_hints));
-
dc_release_state(dc->current_state);
 
dc->current_state = context;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index c9792c47978a..893e44ca90f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2275,7 +2275,6 @@ bool dcn20_fast_validate_bw(
int split_threshold = dc->res_pool->pipe_count / 2;
bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 
-
ASSERT(pipes);
if (!pipes)
return false;
@@ -2353,10 +2352,6 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
 
-   if ((context->stream_count > split_threshold && 
dc->current_state->stream_count <= split_threshold)
-   || (context->stream_count <= split_threshold && 
dc->current_state->stream_count > split_threshold))
-   context->commit_hints.full_update_needed = true;
-
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
pipe_split_from[i] = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h 
b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index eee78a73d88c..a831079607cd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -398,10 +398,6 @@ struct dc_state {
 
struct clk_mgr *clk_mgr;
 
-   struct {
-   bool full_update_needed : 1;
-   } commit_hints;
-
struct kref refcount;
 };
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 31/37] drm/amd/display: 3.2.56

2019-10-17 Thread sunpeng.li
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index b578b2148e45..0416a17b0897 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.55"
+#define DC_VER "3.2.56"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 12/37] drm/amd/display: correctly initialize dml odm variables

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

One of odm variables was not initialized in dml.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
Acked-by: Tony Cheng 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h | 6 --
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 2 ++
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index ab1fc8c5ed10..4f9c3538fa8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2524,7 +2524,7 @@ bool dcn20_fast_validate_bw(
return out;
 }
 
-void dcn20_calculate_wm(
+static void dcn20_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index fe68669a1f0c..dccfe07832e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -150,12 +150,6 @@ void dcn20_calculate_dlg_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel);
-void dcn20_calculate_wm(
-   struct dc *dc, struct dc_state *context,
-   display_e2e_pipe_params_st *pipes,
-   int *out_pipe_cnt,
-   int *pipe_split_from,
-   int vlevel);
 
 enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct 
dc_state *context, struct dc_stream_state *stream);
 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state 
*new_ctx, struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 362dc6ea98ae..038701d7383d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -432,6 +432,8 @@ static void fetch_pipe_params(struct display_mode_lib 
*mode_lib)
dst->recout_width; // TODO: or should this be 
full_recout_width???...maybe only when in hsplit mode?

mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
+   
mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
+   dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) 
(dout->output_format);
mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 34/37] drm/amd/display: Do not call update bounding box on dc create

2019-10-17 Thread sunpeng.li
From: Sung Lee 

[Why]
In Hybrid Graphics, dcn2_1_soc struct stays alive through PnP.
This causes an issue on dc init where dcn2_1_soc which has been
updated by update_bw_bounding_box gets put into dml->soc.
As update_bw_bounding_box is currently incorrect for dcn2.1,
this makes dml calculations fail due to incorrect parameters,
leading to a crash on PnP.

[How]
Comment out update_bw_bounding_box call for now.

Signed-off-by: Sung Lee 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index f165f7e58da9..88f89d073061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1336,6 +1336,12 @@ struct display_stream_compressor *dcn21_dsc_create(
 
 static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params 
*bw_params)
 {
+   /*
+   TODO: Fix this function to calcualte correct values.
+   There are known issues with this function currently
+   that will need to be investigated. Use hardcoded known good values for 
now.
+
+
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = _params->clk_table;
int i;
@@ -1350,11 +1356,11 @@ static void update_bw_bounding_box(struct dc *dc, 
struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].dcfclk_mhz = 
clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[i].fabricclk_mhz = 
clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[i].socclk_mhz = 
clk_table->entries[i].socclk_mhz;
-   /* This is probably wrong, TODO: find correct calculation */
dcn2_1_soc.clock_limits[i].dram_speed_mts = 
clk_table->entries[i].memclk_mhz * 16 / 1000;
}
dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
dcn2_1_soc.num_states = i;
+   */
 }
 
 /* Temporary Place holder until we can get them from fuse */
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 36/37] drm/amd/display: fix hubbub deadline programing

2019-10-17 Thread sunpeng.li
From: Eric Yang 

[Why]
Fix the programming of DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A.
Was not filled in.

Signed-off-by: Eric Yang 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 +
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 3cdb61750570..5e3738e96fdc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2634,6 +2634,7 @@ static void dcn20_calculate_wm(
 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(>bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(>bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
+   context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = 
get_urgent_latency(>bw_ctx.dml, pipes, pipe_cnt) * 1000;
 #endif
 
if (vlevel < 2) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 88f89d073061..12a657692d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1009,6 +1009,7 @@ static void calculate_wm_set_for_vlevel(
 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, 
pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+   wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 
1000;
 #endif
dml->soc.dram_clock_change_latency_us = 
dram_clock_change_latency_cached;
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 35/37] drm/amd/display: fix avoid_split for dcn2+ validation

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

We are currently incorrectly processing avoid split at highest
voltage level.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Eric Bernstein 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index a1b2db8f687a..3cdb61750570 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2357,10 +2357,11 @@ int dcn20_validate_apply_pipe_split_flags(
int vlevel,
bool *split)
 {
-   int i, pipe_idx, vlevel_unsplit;
+   int i, pipe_idx, vlevel_split;
bool force_split = false;
bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 
+   /* Single display loop, exits if there is more than one display */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = >res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2391,22 +2392,24 @@ int dcn20_validate_apply_pipe_split_flags(
if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
 
+   /* Avoid split loop looks for lowest voltage level that allows most 
unsplit pipes possible */
if (avoid_split) {
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
 
-   for (vlevel_unsplit = vlevel; vlevel <= 
context->bw_ctx.dml.soc.num_states; vlevel++)
+   for (vlevel_split = vlevel; vlevel <= 
context->bw_ctx.dml.soc.num_states; vlevel++)
if 
(context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
break;
/* Impossible to not split this pipe */
-   if (vlevel == context->bw_ctx.dml.soc.num_states)
-   vlevel = vlevel_unsplit;
+   if (vlevel > context->bw_ctx.dml.soc.num_states)
+   vlevel = vlevel_split;
pipe_idx++;
}
context->bw_ctx.dml.vba.maxMpcComb = 0;
}
 
+   /* Split loop sets which pipe should be split based on dml outputs and 
dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = >res_ctx.pipe_ctx[i];
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 07/37] drm/amd/display: 3.2.55

2019-10-17 Thread sunpeng.li
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 2e1d34882684..a86dad3808b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.54"
+#define DC_VER "3.2.55"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 21/37] drm/amd/display: add embedded flag to dml

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Eric Bernstein 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 1 +
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 1 +
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h | 1 +
 3 files changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 83f84cdd4055..cfacd6027467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
+   unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 038701d7383d..7f9a5621922f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib 
*mode_lib)
 
mode_lib->vba.pipe_plane[j] = 
mode_lib->vba.NumberOfActivePlanes;
 
+   mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] 
= dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 
1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 91decac50557..1540ffbe3979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -387,6 +387,7 @@ struct vba_vars_st {
 
/* vba mode support */
/*inputs*/
+   bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 32/37] drm/amd/display: take signal type from link

2019-10-17 Thread sunpeng.li
From: Lewis Huang 

[Why]
Signal is update to EDP when driver disable first encoder. The
following encoder using SIGNAL_TYPE_EDP to handle other
device. When encoder signal is HDMI, driver will detect it is dp
and release phy. It cause hw hang.

[How]
Take signal type from link->connector_signal.

Signed-off-by: Lewis Huang 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c   | 11 +++
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 9150e546dcf2..f0e837d14000 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1421,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 static void power_down_encoders(struct dc *dc)
 {
int i;
-   enum connector_id connector_id;
-   enum signal_type signal = SIGNAL_TYPE_NONE;
 
/* do not know BIOS back-front mapping, simply blank all. It will not
 * hurt for non-DP
@@ -1433,15 +1431,12 @@ static void power_down_encoders(struct dc *dc)
}
 
for (i = 0; i < dc->link_count; i++) {
-   connector_id = 
dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
-   if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
-   (connector_id == CONNECTOR_ID_EDP)) {
+   enum signal_type signal = dc->links[i]->connector_signal;
 
+   if ((signal == SIGNAL_TYPE_EDP) ||
+   (signal == SIGNAL_TYPE_DISPLAY_PORT))
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
-   if (connector_id == CONNECTOR_ID_EDP)
-   signal = SIGNAL_TYPE_EDP;
-   }
 
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 33/37] drm/amd/display: Add center mode for integer scaling in DC

2019-10-17 Thread sunpeng.li
From: Reza Amini 

[why]
We want to use maximum space on display to show source

[how]
For Centered Mode: Replicate source as many times as possible to use
maximum of display active space add borders.

Signed-off-by: Reza Amini 
Reviewed-by: Anthony Koo 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 43 +++
 1 file changed, 35 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8fe39fdefc27..70e601a975df 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -951,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx 
*pipe_ctx)
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, 
data->ratios.vert_c);
 
 }
-static bool are_rect_integer_multiples(struct rect src, struct rect dest)
+static bool are_rects_integer_multiples(struct rect src, struct rect dest)
 {
if (dest.width  >= src.width  && dest.width  % src.width  == 0 &&
dest.height >= src.height && dest.height % src.height == 0)
@@ -959,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, 
struct rect dest)
 
return false;
 }
+
+void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+{
+   if (!pipe_ctx->plane_state->scaling_quality.integer_scaling)
+   return;
+
+   //for Centered Mode
+   if (pipe_ctx->stream->dst.width  == pipe_ctx->stream->src.width &&
+   pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) {
+   // calculate maximum # of replication of src onto addressable
+   unsigned int integer_multiple = min(
+   pipe_ctx->stream->timing.h_addressable / 
pipe_ctx->stream->src.width,
+   pipe_ctx->stream->timing.v_addressable  / 
pipe_ctx->stream->src.height);
+
+   //scale dst
+   pipe_ctx->stream->dst.width  = integer_multiple * 
pipe_ctx->stream->src.width;
+   pipe_ctx->stream->dst.height = integer_multiple * 
pipe_ctx->stream->src.height;
+
+   //center dst onto addressable
+   pipe_ctx->stream->dst.x = 
(pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
+   pipe_ctx->stream->dst.y = 
(pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
+   }
+
+   //disable taps if src & dst are integer ratio
+   if (are_rects_integer_multiples(pipe_ctx->stream->src, 
pipe_ctx->stream->dst)) {
+   pipe_ctx->plane_state->scaling_quality.v_taps = 1;
+   pipe_ctx->plane_state->scaling_quality.h_taps = 1;
+   pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
+   pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
+   }
+}
+
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -972,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx 
*pipe_ctx)
pipe_ctx->plane_res.scl_data.format = 
convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
 
+   calculate_integer_scaling(pipe_ctx);
+
calculate_scaling_ratios(pipe_ctx);
 
calculate_viewport(pipe_ctx);
@@ -1002,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx 
*pipe_ctx)
res = 
pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, 
_ctx->plane_res.scl_data, _state->scaling_quality);
 
-   if (res &&
-   plane_state->scaling_quality.integer_scaling &&
-   are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport,
-  pipe_ctx->plane_res.scl_data.recout)) {
-   pipe_ctx->plane_res.scl_data.taps.v_taps = 1;
-   pipe_ctx->plane_res.scl_data.taps.h_taps = 1;
-   }
 
if (!res) {
/* Try 24 bpp linebuffer */
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 30/37] drm/amd/display: Make clk mgr the only dto update point

2019-10-17 Thread sunpeng.li
From: Noah Abradjian 

[Why]

* Clk Mgr DTO update point did not cover all needed updates, as it included a
  check for plane_state which does not exist yet when the updater is called on
  driver startup
* This resulted in another update path in the pipe programming sequence, based
  on a dppclk update flag
* However, this alternate path allowed for stray DTO updates, some of which 
would
  occur in the wrong order during dppclk lowering and cause underflow

[How]

* Remove plane_state check and use of plane_res.dpp->inst, getting rid
  of sequence dependencies (this results in extra dto programming for unused
  pipes but that doesn't cause issues and is a small cost)
* Allow DTOs to be updated even if global clock is equal, to account for
  edge case exposed by diags tests
* Remove update_dpp_dto call in pipe programming sequence (leave update to
  dppclk_control there, as that update is necessary and shouldn't occur in clk
  mgr)
* Remove call to optimize_bandwidth when committing state, as it is not needed
  and resulted in sporadic underflows even with other fixes in place

Signed-off-by: Noah Abradjian 
Reviewed-by: Jun Lei 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c   | 14 +-
 .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c  |  3 ++-
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  4 
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |  8 +---
 4 files changed, 12 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 607d8afc56ec..25d7b7c6681c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -108,11 +108,12 @@ void dcn20_update_clocks_update_dpp_dto(struct 
clk_mgr_internal *clk_mgr,
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz;
 
-   if (!context->res_ctx.pipe_ctx[i].plane_state)
-   continue;
-
-   dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+   /* Loop index will match dpp->inst if resource exists,
+* and we want to avoid dependency on dpp object
+*/
+   dpp_inst = i;
dppclk_khz = 
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}
@@ -235,6 +236,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 
update_dispclk = true;
}
+
if (dc->config.forced_clocks == false || (force_reset && 
safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before 
lowering refclk
@@ -244,10 +246,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before 
lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
-   if (update_dppclk)
+   // always update dtos unless clock is lowered and not 
safe to lower
+   if (new_clocks->dppclk_khz >= 
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, 
context);
}
}
+
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index f64d221ad6f1..790a2d211bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -171,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, 
clk_mgr_base->clks.dppclk_khz);
-   if (update_dppclk)
+   // always update dtos unless clock is lowered and not safe to 
lower
+   if (new_clocks->dppclk_khz >= 
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5e487bb82861..0a443348df10 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1241,10 +1241,6 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
 
   

[PATCH 28/37] drm/amd/display: move wm ranges reporting to end of init hw

2019-10-17 Thread sunpeng.li
From: Eric Yang 

[Why]
SMU does not keep the wm table across S3, S4, need to re-send
the table. Also defer sending the cable to after DCN bave initialized

[How]
Send table at end of init hw

Signed-off-by: Eric Yang 
Reviewed-by: Yongqiang Sun 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 149 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |   4 +
 .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h   |   1 +
 3 files changed, 81 insertions(+), 73 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index e8b8ee4f1b1e..f64d221ad6f1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -396,12 +396,87 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
 }
 
+void build_watermark_ranges(struct clk_bw_params *bw_params, struct 
pp_smu_wm_range_sets *ranges)
+{
+   int i, num_valid_sets;
+
+   num_valid_sets = 0;
+
+   for (i = 0; i < WM_SET_COUNT; i++) {
+   /* skip empty entries, the smu array has no holes*/
+   if (!bw_params->wm_table.entries[i].valid)
+   continue;
+
+   ranges->reader_wm_sets[num_valid_sets].wm_inst = 
bw_params->wm_table.entries[i].wm_inst;
+   ranges->reader_wm_sets[num_valid_sets].wm_type = 
bw_params->wm_table.entries[i].wm_type;;
+   /* We will not select WM based on dcfclk, so leave it as 
unconstrained */
+   ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   /* fclk wil be used to select WM*/
+
+   if (ranges->reader_wm_sets[num_valid_sets].wm_type == 
WM_TYPE_PSTATE_CHG) {
+   if (i == 0)
+   
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+   else {
+   /* add 1 to make it non-overlapping with next 
lvl */
+   
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 
bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+   }
+   ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz 
= bw_params->clk_table.entries[i].fclk_mhz;
+
+   } else {
+   /* unconstrained for memory retraining */
+   ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz 
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz 
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+   /* Modify previous watermark range to cover up to max */
+   ranges->reader_wm_sets[num_valid_sets - 
1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   }
+   num_valid_sets++;
+   }
+
+   ASSERT(num_valid_sets != 0); /* Must have at least one set of valid 
watermarks */
+   ranges->num_reader_wm_sets = num_valid_sets;
+
+   /* modify the min and max to make sure we cover the whole range*/
+   ranges->reader_wm_sets[0].min_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[0].min_fill_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[ranges->num_reader_wm_sets - 
1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz 
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+   /* This is for writeback only, does not matter currently as no 
writeback support*/
+   ranges->num_writer_wm_sets = 1;
+   ranges->writer_wm_sets[0].wm_inst = WM_A;
+   ranges->writer_wm_sets[0].min_fill_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->writer_wm_sets[0].max_fill_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   ranges->writer_wm_sets[0].min_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->writer_wm_sets[0].max_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+}
+
+static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+   struct dc_debug_options *debug = _mgr_base->ctx->dc->debug;
+   struct pp_smu_wm_range_sets ranges = {0};
+   struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+   struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
+
+   if (!debug->disable_pplib_wm_range) {
+   build_watermark_ranges(clk_mgr_base->bw_params, );
+
+   /* Notify PP Lib/SMU which Watermarks to use for which clock 
ranges */
+   if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
+

[PATCH 25/37] drm/amd/display: Disable force_single_disp_pipe_split on DCN2+

2019-10-17 Thread sunpeng.li
From: Michael Strauss 

[WHY]
force_single_disp_pipe_split is a debug flag for use on DCN1
but isn't necessary otherwise as DCN2+ splits by default

Signed-off-by: Michael Strauss 
Reviewed-by: Tony Cheng 
Acked-by: Charlene Liu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 88a938633d11..a1b2db8f687a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -863,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
-   .force_single_disp_pipe_split = true,
+   .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index e07f03368c97..f165f7e58da9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -836,7 +836,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
-   .force_single_disp_pipe_split = true,
+   .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 24/37] drm/amd/display: Passive DP->HDMI dongle detection fix

2019-10-17 Thread sunpeng.li
From: Michael Strauss 

[WHY]
i2c_read is called to differentiate passive DP->HDMI and DP->DVI-D dongles
The call is expected to fail in DVI-D case but pass in HDMI case
Some HDMI dongles have a chance to fail as well, causing misdetection as DVI-D

[HOW]
Retry i2c_read to ensure failed result is valid

Signed-off-by: Michael Strauss 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c | 24 ++-
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 9a56f110bbd1..7f904d55c1bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
enum display_dongle_type *dongle = _cap->dongle_type;
uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
bool is_type2_dongle = false;
+   int retry_count = 2;
struct dp_hdmi_dongle_signature_data *dongle_signature;
 
/* Assume we have no valid DP passive dongle connected */
@@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
DP_HDMI_DONGLE_ADDRESS,
type2_dongle_buf,
sizeof(type2_dongle_buf))) {
-   *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
-   sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+   /* Passive HDMI dongles can sometimes fail here without 
retrying*/
+   while (retry_count > 0) {
+   if (i2c_read(ddc,
+   DP_HDMI_DONGLE_ADDRESS,
+   type2_dongle_buf,
+   sizeof(type2_dongle_buf)))
+   break;
+   retry_count--;
+   }
+   if (retry_count == 0) {
+   *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+   sink_cap->max_hdmi_pixel_clock = 
DP_ADAPTOR_DVI_MAX_TMDS_CLK;
 
-   CONN_DATA_DETECT(ddc->link, type2_dongle_buf, 
sizeof(type2_dongle_buf),
-   "DP-DVI passive dongle %dMhz: ",
-   DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
-   return;
+   CONN_DATA_DETECT(ddc->link, type2_dongle_buf, 
sizeof(type2_dongle_buf),
+   "DP-DVI passive dongle %dMhz: ",
+   DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+   return;
+   }
}
 
/* Check if Type 2 dongle.*/
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 22/37] drm/amd/display: add flag to allow diag to force enumerate edp

2019-10-17 Thread sunpeng.li
From: Jun Lei 

[why]
SLT tests require that diag can drive eDP even if nothing is connected, this is 
not
typical production use case, so we need to add flag

[how]
add flag, and this flag supercedes "should destroy" logic

Signed-off-by: Jun Lei 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
 drivers/gpu/drm/amd/display/dc/dc.h  | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 55b82ca44c3b..5e487bb82861 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -194,7 +194,7 @@ static bool create_links(
}
}
 
-   if (!should_destory_link) {
+   if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index a86dad3808b6..b578b2148e45 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -220,6 +220,7 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+   bool force_enum_edp;
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended 
timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 20/37] drm/amd/display: fix number of dcn21 dpm clock levels

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

These are specific to dcn21 and should not be increased for
reuse on other asics.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h 
b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index b01db61b6181..ef7df9ef6d7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -251,8 +251,8 @@ struct pp_smu_funcs_nv {
 
 #define PP_SMU_NUM_SOCCLK_DPM_LEVELS  8
 #define PP_SMU_NUM_DCFCLK_DPM_LEVELS  8
-#define PP_SMU_NUM_FCLK_DPM_LEVELS8
-#define PP_SMU_NUM_MEMCLK_DPM_LEVELS  8
+#define PP_SMU_NUM_FCLK_DPM_LEVELS4
+#define PP_SMU_NUM_MEMCLK_DPM_LEVELS  4
 
 struct dpm_clock {
   uint32_t  Freq;// In MHz
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 17/37] drm/amd/display: Update min dcfclk

2019-10-17 Thread sunpeng.li
From: Alvin Lee 

[Why]
NV12 has lower min dcfclk

[How]
Add update in update_bounding_box

Signed-off-by: Alvin Lee 
Reviewed-by: Jun Lei 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c| 12 
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 2596d4ac6263..25515c255a3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3084,10 +3084,14 @@ void dcn20_update_bounding_box(struct dc *dc, struct 
_vcs_dpi_soc_bounding_box_s
 
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
-   else
-   // Accounting for SOC/DCF relationship, we can go as high as
-   // 506Mhz in Vmin.  We need to code 507 since SMU will round 
down to 506.
-   min_dcfclk = 507;
+   else {
+   if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+   min_dcfclk = 310;
+   else
+   // Accounting for SOC/DCF relationship, we can go as 
high as
+   // 506Mhz in Vmin.
+   min_dcfclk = 506;
+   }
 
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 23/37] drm/amd/display: map TRANSMITTER_UNIPHY_x to LINK_REGS_x

2019-10-17 Thread sunpeng.li
From: Yogesh Mohan Marimuthu 

[Why]
The enum value for TRANSMITTER_UNIPHY_G is 9. In resource dc_xx_resource
file structure link_enc_regs[], the TRANSMITTER_UNIPHY_G registers are
initialized at index 6. Due to this mismatch, if monitor is attached to
port using TRANSMITTER_UNIPHY_G then the monitor blanks out.

[How]
add function map_transmitter_id_to_phy_instance() and use the function
to map enum transmitter to link regs.

Signed-off-by: Yogesh Mohan Marimuthu 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 .../amd/display/dc/dce100/dce100_resource.c   | 37 ++-
 .../amd/display/dc/dce110/dce110_resource.c   | 37 ++-
 .../amd/display/dc/dce112/dce112_resource.c   | 37 ++-
 .../amd/display/dc/dce120/dce120_resource.c   | 37 ++-
 .../drm/amd/display/dc/dce80/dce80_resource.c | 37 ++-
 .../drm/amd/display/dc/dcn10/dcn10_resource.c | 28 +-
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 33 -
 .../drm/amd/display/dc/dcn21/dcn21_resource.c | 31 +++-
 8 files changed, 269 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index b5d6dff29c45..a5e122c721ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = {
 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
 #endif
 
+static int map_transmitter_id_to_phy_instance(
+   enum transmitter transmitter)
+{
+   switch (transmitter) {
+   case TRANSMITTER_UNIPHY_A:
+   return 0;
+   break;
+   case TRANSMITTER_UNIPHY_B:
+   return 1;
+   break;
+   case TRANSMITTER_UNIPHY_C:
+   return 2;
+   break;
+   case TRANSMITTER_UNIPHY_D:
+   return 3;
+   break;
+   case TRANSMITTER_UNIPHY_E:
+   return 4;
+   break;
+   case TRANSMITTER_UNIPHY_F:
+   return 5;
+   break;
+   case TRANSMITTER_UNIPHY_G:
+   return 6;
+   break;
+   default:
+   ASSERT(0);
+   return 0;
+   }
+}
+
 static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -579,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create(
 {
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+   int link_regs_id;
 
if (!enc110)
return NULL;
 
+   link_regs_id =
+   map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
  enc_init_data,
  _enc_feature,
- 
_enc_regs[enc_init_data->transmitter],
+ _enc_regs[link_regs_id],
  _enc_aux_regs[enc_init_data->channel 
- 1],
  
_enc_hpd_regs[enc_init_data->hpd_source]);
return >base;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index c651a38e34a0..83a4dbf6d76e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -448,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = {
 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
 #endif
 
+static int map_transmitter_id_to_phy_instance(
+   enum transmitter transmitter)
+{
+   switch (transmitter) {
+   case TRANSMITTER_UNIPHY_A:
+   return 0;
+   break;
+   case TRANSMITTER_UNIPHY_B:
+   return 1;
+   break;
+   case TRANSMITTER_UNIPHY_C:
+   return 2;
+   break;
+   case TRANSMITTER_UNIPHY_D:
+   return 3;
+   break;
+   case TRANSMITTER_UNIPHY_E:
+   return 4;
+   break;
+   case TRANSMITTER_UNIPHY_F:
+   return 5;
+   break;
+   case TRANSMITTER_UNIPHY_G:
+   return 6;
+   break;
+   default:
+   ASSERT(0);
+   return 0;
+   }
+}
+
 static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -625,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create(
 {
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+   int link_regs_id;
 
if (!enc110)
return NULL;
 
+   link_regs_id =
+   map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
  

[PATCH 08/37] drm/amd/display: Add debugfs entry for reading psr state

2019-10-17 Thread sunpeng.li
From: Roman Li 

[Why]
For upcoming PSR stupport it's useful to have debug entry
to verify psr state.

[How]
 - Enable psr dc api for Linux
 - Add psr_state file to eDP connector debugfs
usage e.g.: cat /sys/kernel/debug/dri/0/DP-1/psr_state

Signed-off-by: Roman Li 
Reviewed-by: Nicholas Kazlauskas 
Acked-by: Leo Li 
---
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  21 +++
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 149 ++
 2 files changed, 170 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index e29c6314f98c..bdb37e611015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -969,6 +969,25 @@ static int force_yuv420_output_get(void *data, u64 *val)
 DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
 force_yuv420_output_set, "%llu\n");
 
+/*
+ *  Read PSR state
+ */
+static int psr_get(void *data, u64 *val)
+{
+   struct amdgpu_dm_connector *connector = data;
+   struct dc_link *link = connector->dc_link;
+   uint32_t psr_state = 0;
+
+   dc_link_get_psr_state(link, _state);
+
+   *val = psr_state;
+
+   return 0;
+}
+
+
+DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+
 void connector_debugfs_init(struct amdgpu_dm_connector *connector)
 {
int i;
@@ -982,6 +1001,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector 
*connector)
dp_debugfs_entries[i].fops);
}
}
+   if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+   debugfs_create_file_unsafe("psr_state", 0444, dir, connector, 
_fops);
 
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
   _yuv420_output_fops);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 935053664160..10a04565535c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2436,6 +2436,155 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, 
bool allow_active, bool
return true;
 }
 
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+   struct dc  *core_dc = link->ctx->dc;
+   struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+   if (dmcu != NULL && link->psr_feature_enabled)
+   dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+   return true;
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+   const struct dc_stream_state *stream, struct psr_config 
*psr_config,
+   struct psr_context *psr_context)
+{
+   struct dc *core_dc;
+   struct dmcu *dmcu;
+   int i;
+   /* updateSinkPsrDpcdConfig*/
+   union dpcd_psr_configuration psr_configuration;
+
+   psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+   if (!link)
+   return false;
+
+   core_dc = link->ctx->dc;
+   dmcu = core_dc->res_pool->dmcu;
+
+   if (!dmcu)
+   return false;
+
+
+   memset(_configuration, 0, sizeof(psr_configuration));
+
+   psr_configuration.bits.ENABLE= 1;
+   psr_configuration.bits.CRC_VERIFICATION  = 1;
+   psr_configuration.bits.FRAME_CAPTURE_INDICATION  =
+   psr_config->psr_frame_capture_indication_req;
+
+   /* Check for PSR v2*/
+   if (psr_config->psr_version == 0x2) {
+   /* For PSR v2 selective update.
+* Indicates whether sink should start capturing
+* immediately following active scan line,
+* or starting with the 2nd active scan line.
+*/
+   psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+   /*For PSR v2, determines whether Sink should generate
+* IRQ_HPD when CRC mismatch is detected.
+*/
+   psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR= 1;
+   }
+
+   dm_helpers_dp_write_dpcd(
+   link->ctx,
+   link,
+   368,
+   _configuration.raw,
+   sizeof(psr_configuration.raw));
+
+   psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+   psr_context->transmitterId = link->link_enc->transmitter;
+   psr_context->engineId = link->link_enc->preferred_engine;
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+   == stream) {
+   /* dmcu -1 for all controller id values,
+* therefore +1 here
+*/
+   psr_context->controllerId =
+   

[PATCH 18/37] drm/amd/display: Allow inverted gamma

2019-10-17 Thread sunpeng.li
From: Aidan Yang 

[why]
There's a use case for inverted gamma
and it's been confirmed that negative slopes are ok.

[how]
Remove code for blocking non-monotonically increasing gamma

Signed-off-by: Aidan Yang 
Reviewed-by: Krunoslav Kovac 
Acked-by: Leo Li 
Acked-by: Reza Amini 
---
 .../amd/display/dc/dcn10/dcn10_cm_common.c| 22 +++
 1 file changed, 8 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 01c7e30b9ce1..bbd6e01b3eca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
rgb_resulted[hw_points - 1].green = 
output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+   rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+   rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+   rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
// All 3 color channels have same x
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 dc_fixpt_from_int(region_start));
@@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
 
i = 1;
while (i != hw_points + 1) {
-   if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-   rgb_plus_1->red = rgb->red;
-   if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-   rgb_plus_1->green = rgb->green;
-   if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-   rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
@@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
rgb_resulted[hw_points - 1].green = 
output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+   rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+   rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+   rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
@@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
i = 1;
while (i != hw_points + 1) {
-   if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-   rgb_plus_1->red = rgb->red;
-   if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-   rgb_plus_1->green = rgb->green;
-   if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-   rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 03/37] drm/amd/display: Add unknown clk state.

2019-10-17 Thread sunpeng.li
From: Yongqiang Sun 

[Why]
System hang during S0i3 if DP only connected due to clk is disabled when
doing link training.
During S0i3, clk is disabled while the clk state is updated when ini_hw
called, and at the moment clk is still disabled which indicating a wrong
state for next time trying to enable clk.

[How]
Add an unknown state and initialize it during int_hw, make sure enable clk
command be sent to smu.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c| 16 
 .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/dc.h  |  5 +++--
 3 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index b647e0320e4b..6212b407cd01 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -114,22 +114,22 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
 */
if (safe_to_lower) {
/* check that we're not already in lower */
-   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_OPTIMIZED) {
+   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
 
display_count = rn_get_active_display_cnt_wa(dc, 
context);
/* if we can go lower, go lower */
if (display_count == 0) {
-   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_OPTIMIZED);
+   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_LOW_POWER);
/* update power state */
-   clk_mgr_base->clks.pwr_state = 
DCN_PWR_STATE_OPTIMIZED;
+   clk_mgr_base->clks.pwr_state = 
DCN_PWR_STATE_LOW_POWER;
}
}
} else {
-   /* check that we're not already in the normal state */
-   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_NORMAL) {
-   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_NORMAL);
+   /* check that we're not already in D0 */
+   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) 
{
+   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_MISSION_MODE);
/* update power state */
-   clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_NORMAL;
+   clk_mgr_base->clks.pwr_state = 
DCN_PWR_STATE_MISSION_MODE;
}
}
 
@@ -393,7 +393,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
// Assumption is that boot state always supports pstate
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
-   clk_mgr->clks.pwr_state = DCN_PWR_STATE_NORMAL;
+   clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
 }
 
 static struct clk_mgr_funcs dcn21_funcs = {
diff --git 
a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 5647fcf10717..cb7c0e8b7e1b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -170,7 +170,7 @@ void rn_vbios_smu_set_dcn_low_power_state(struct 
clk_mgr_internal *clk_mgr, enum
 {
int disp_count;
 
-   if (state == DCN_PWR_STATE_OPTIMIZED)
+   if (state == DCN_PWR_STATE_LOW_POWER)
disp_count = 0;
else
disp_count = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index b7e7181bad78..2e1d34882684 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -257,8 +257,9 @@ enum dtm_pstate{
 };
 
 enum dcn_pwr_state {
-   DCN_PWR_STATE_OPTIMIZED = 0,
-   DCN_PWR_STATE_NORMAL = 1
+   DCN_PWR_STATE_UNKNOWN = -1,
+   DCN_PWR_STATE_MISSION_MODE = 0,
+   DCN_PWR_STATE_LOW_POWER = 3,
 };
 
 /*
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 10/37] drm/amd/display: correctly populate dpp refclk in fpga

2019-10-17 Thread sunpeng.li
From: Anthony Koo 

[Why]
In diags environment we are not programming the DPP DTO
correctly.

[How]
Populate the dpp refclk in dccg so it can be used to correctly
program DPP DTO.

Signed-off-by: Anthony Koo 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c   | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index ecd2cb4840e3..69daddbfbf29 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -260,6 +260,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
 {
+   struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
struct dc_clocks *new_clocks = >bw_ctx.bw.dcn.clk;
/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off 
of it */
int fclk_adj = new_clocks->fclk_khz > 120 ? new_clocks->fclk_khz : 
120;
@@ -297,14 +299,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
}
 
-   /* Both fclk and dppclk ref are run on the same scemi clock so we
-* need to keep the same value for both
+   /* Both fclk and ref_dppclk run on the same scemi clock.
+* So take the higher value since the DPP DTO is typically programmed
+* such that max dppclk is 1:1 with ref_dppclk.
 */
if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
 
+   // Both fclk and ref_dppclk run on the same scemi clock.
+   clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
dm_set_dcn_clocks(clk_mgr->ctx, _mgr->clks);
 }
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 04/37] drm/amd/display: Don't use optimized gamma22 with eetf

2019-10-17 Thread sunpeng.li
From: Aidan Yang 

[why]
Optimized gamma22 assumes fixed point distribution which is not true
for eetf true.

[how]
Use long calculation for eetf.

Signed-off-by: Aidan Yang 
Reviewed-by: Krunoslav Kovac 
Acked-by: Leo Li 
Acked-by: Reza Amini 
---
 .../amd/display/modules/color/color_gamma.c   | 45 +--
 1 file changed, 41 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 2d8f14b69117..85dad356c9d5 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space(
return dc_fixpt_mul(args->arg, args->a1);
 }
 
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+
+static struct fixed31_32 translate_from_linear_space_long(
+   struct translate_from_linear_space_args *args)
+{
+   const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+   if (dc_fixpt_lt(one, args->arg))
+   return one;
+
+   if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0)))
+   return dc_fixpt_sub(
+   args->a2,
+   dc_fixpt_mul(
+   dc_fixpt_add(
+   one,
+   args->a3),
+   dc_fixpt_pow(
+   dc_fixpt_neg(args->arg),
+   dc_fixpt_recip(args->gamma;
+   else if (dc_fixpt_le(args->a0, args->arg))
+   return dc_fixpt_sub(
+   dc_fixpt_mul(
+   dc_fixpt_add(
+   one,
+   args->a3),
+   dc_fixpt_pow(
+   args->arg,
+   dc_fixpt_recip(args->gamma))),
+   args->a2);
+   else
+   return dc_fixpt_mul(
+   args->arg,
+   args->a1);
+}
+
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool 
use_eetf)
 {
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
 
@@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct 
fixed31_32 arg)
scratch_gamma_args.a3 = dc_fixpt_zero;
scratch_gamma_args.gamma = gamma;
 
+   if (use_eetf)
+   return translate_from_linear_space_long(_gamma_args);
+
return translate_from_linear_space(_gamma_args);
 }
 
+
 static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -950,7 +989,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex 
*rgb_regamma,
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
-   output = calculate_gamma22(scaledX);
+   output = calculate_gamma22(scaledX, 
use_eetf);
 
rgb->r = output;
rgb->g = output;
@@ -2173,5 +2212,3 @@ bool  mod_color_calculate_degamma_curve(enum 
dc_transfer_func_predefined trans,
 rgb_degamma_alloc_fail:
return ret;
 }
-
-
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 13/37] drm/amd/display: move dispclk vco freq to clk mgr base

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

This value will be needed by dml and therefore should be externally
accessible.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Nevenko Stupar 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c| 14 +++---
 .../amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c |  4 ++--
 .../drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c | 10 +-
 .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c   | 14 +++---
 .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c  | 12 ++--
 .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h  |  7 ---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c  | 12 ++--
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.h  |  6 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h|  2 +-
 .../drm/amd/display/dc/inc/hw/clk_mgr_internal.h   |  2 --
 10 files changed, 40 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index c5c8c4901eed..26db1c5d4e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
 
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
-   * clk_mgr->dentist_vco_freq_khz) / target_div;
+   * clk_mgr->base.dentist_vco_freq_khz) / target_div;
 
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
 }
@@ -239,7 +239,7 @@ int dce_set_clock(
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
-   clk_mgr_dce->dentist_vco_freq_khz / 64);
+   clk_mgr_dce->base.dentist_vco_freq_khz / 64);
 
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
@@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct 
clk_mgr_internal *clk_mgr_dce)
int i;
 
if (bp->integrated_info)
-   clk_mgr_dce->dentist_vco_freq_khz = 
bp->integrated_info->dentist_vco_freq;
-   if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
-   clk_mgr_dce->dentist_vco_freq_khz = 
bp->fw_info.smu_gpu_pll_output_freq;
-   if (clk_mgr_dce->dentist_vco_freq_khz == 0)
-   clk_mgr_dce->dentist_vco_freq_khz = 360;
+   clk_mgr_dce->base.dentist_vco_freq_khz = 
bp->integrated_info->dentist_vco_freq;
+   if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) {
+   clk_mgr_dce->base.dentist_vco_freq_khz = 
bp->fw_info.smu_gpu_pll_output_freq;
+   if (clk_mgr_dce->base.dentist_vco_freq_khz == 0)
+   clk_mgr_dce->base.dentist_vco_freq_khz = 360;
}
 
/*update the maximum display clock for each power state*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 7c746ef1e32e..a6c46e903ff9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int 
requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
-   clk_mgr_dce->dentist_vco_freq_khz / 62);
+   clk_mgr_dce->base.dentist_vco_freq_khz / 62);
 
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
@@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, 
int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
-   clk_mgr->dentist_vco_freq_khz / 62);
+   clk_mgr->base.dentist_vco_freq_khz / 62);
 
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 5b3d36d41822..3fab9296918a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -269,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct 
clk_mgr_internal *clk_
clk_mgr->base.dprefclk_khz = 60;
 
if (bp->integrated_info)
-   

[PATCH 05/37] drm/amd/display: Remove superfluous assert

2019-10-17 Thread sunpeng.li
From: Jordan Lazare 

[Why]
For loop below the assert already checks for the number of instances to
create. ASSERT is meaningless and causing spam.

[How]
dd

Signed-off-by: Jordan Lazare 
Reviewed-by: Harry Wentland 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index ee9157b673ab..c9792c47978a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2913,8 +2913,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct 
resource_pool *pool)
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
 
-   ASSERT(pipe_count > 0);
-
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 19/37] drm/amd/display: enable vm by default for rn.

2019-10-17 Thread sunpeng.li
From: Yongqiang Sun 

[Why & How]
vm should be enabled by default for rn to get
right dml.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Dmytro Laktyushkin 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dcn21/dcn21_resource.c | 29 ---
 1 file changed, 25 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 5e3b48bb04f1..a4d9ed9f2623 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -83,8 +83,8 @@
 
 struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,
-   .gpuvm_enable = 0,
-   .hostvm_enable = 0,
+   .gpuvm_enable = 1,
+   .hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
@@ -669,6 +669,9 @@ static const struct dcn10_stream_encoder_mask se_mask = {
 
 static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
 
+static int dcn21_populate_dml_pipes_from_context(
+   struct dc *dc, struct resource_context *res_ctx, 
display_e2e_pipe_params_st *pipes);
+
 static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
 {
@@ -1083,7 +1086,7 @@ void dcn21_calculate_wm(
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
>res_ctx, pipes);
else
-   pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
+   pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
>res_ctx, pipes);
}
 
@@ -1585,11 +1588,29 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
 }
 
+static int dcn21_populate_dml_pipes_from_context(
+   struct dc *dc, struct resource_context *res_ctx, 
display_e2e_pipe_params_st *pipes)
+{
+   uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, 
pipes);
+   int i;
+
+   for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+   if (!res_ctx->pipe_ctx[i].stream)
+   continue;
+
+   pipes[i].pipe.src.hostvm = 1;
+   pipes[i].pipe.src.gpuvm = 1;
+   }
+
+   return pipe_cnt;
+}
+
 static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
.validate_bandwidth = dcn21_validate_bandwidth,
-   .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
+   .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 14/37] drm/amd/display: remove unnecessary assert

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index db3fb57bf244..9bc0ffad7093 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1633,7 +1633,6 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.dsc = NULL;
 #endif
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != 
next_odm_pipe) {
-   ASSERT(!next_odm_pipe->next_odm_pipe);
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
}
@@ -2398,8 +2397,8 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;

context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
}
-   context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]
-   = 
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+   context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
+   
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
/* Adjust dppclk when split is forced, do not bother with 
dispclk */
if (split[i] && 
context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]
 == 1)

context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]
 /= 2;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 01/37] drm/amd/display: add 50us buffer as WA for pstate switch in active

2019-10-17 Thread sunpeng.li
From: Jun Lei 

Signed-off-by: Jun Lei 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 649883777f62..6c6c486b774a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -2577,7 +2577,8 @@ static void 
dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
 
-   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+   mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = 
dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || 
mode_lib->vba.NumberOfActivePlanes == 1) {
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 02/37] drm/amd/display: add odm visual confirm

2019-10-17 Thread sunpeng.li
From: Jun Lei 

[why]
Hard to determine if pipe combine is done with MPC or ODM

[how]
Add new visual confirm type, this will mark each MPCC tree
with a different color

Signed-off-by: Jun Lei 
Reviewed-by: Yongqiang Sun 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h   |  1 +
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 25 +++
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.h|  4 ++-
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 5967106826ca..b7e7181bad78 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -229,6 +229,7 @@ enum visual_confirm {
VISUAL_CONFIRM_DISABLE = 0,
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
+   VISUAL_CONFIRM_MPCTREE = 4,
 };
 
 enum dcc_option {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 6229a8ca0013..e237ec39d193 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1996,6 +1996,28 @@ static void dcn20_reset_hw_ctx_wrap(
}
 }
 
+void dcn20_get_mpctree_visual_confirm_color(
+   struct pipe_ctx *pipe_ctx,
+   struct tg_color *color)
+{
+   const struct tg_color pipe_colors[6] = {
+   {MAX_TG_COLOR_VALUE, 0, 0}, // red
+   {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
+   {0, MAX_TG_COLOR_VALUE, 0}, // blue
+   {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // 
purple
+   {0, 0, MAX_TG_COLOR_VALUE}, // green
+   {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // 
orange
+   };
+
+   struct pipe_ctx *top_pipe = pipe_ctx;
+
+   while (top_pipe->top_pipe) {
+   top_pipe = top_pipe->top_pipe;
+   }
+
+   *color = pipe_colors[top_pipe->pipe_idx];
+}
+
 static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2013,6 +2035,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, _cfg.black_color);
+   } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+   dcn20_get_mpctree_visual_confirm_color(
+   pipe_ctx, _cfg.black_color);
}
 
if (per_pixel_alpha)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 9dbc2effa4ea..3098f1049ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -109,5 +109,7 @@ bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
 bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-
+void dcn20_get_mpctree_visual_confirm_color(
+   struct pipe_ctx *pipe_ctx,
+   struct tg_color *color);
 #endif /* __DC_HWSS_DCN20_H__ */
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 00/37] DC Patches 17 Oct 2019

2019-10-17 Thread sunpeng.li
From: Leo Li 

This series has dependencies on the recent Renoir series:
https://patchwork.freedesktop.org/series/67803/#rev2

Summary of changes:
* Enable PSR on supported eDP panels
* Allow programming of negative gamma slope


Aidan Yang (2):
  drm/amd/display: Don't use optimized gamma22 with eetf
  drm/amd/display: Allow inverted gamma

Alvin Lee (1):
  drm/amd/display: Update min dcfclk

Anthony Koo (2):
  drm/amd/display: correctly populate dpp refclk in fpga
  drm/amd/display: Proper return of result when aux engine acquire fails

Aric Cyr (2):
  drm/amd/display: 3.2.55
  drm/amd/display: 3.2.56

Dmytro Laktyushkin (8):
  drm/amd/display: remove unused code
  drm/amd/display: split dcn20 fast validate into more functions
  drm/amd/display: correctly initialize dml odm variables
  drm/amd/display: move dispclk vco freq to clk mgr base
  drm/amd/display: remove unnecessary assert
  drm/amd/display: fix number of dcn21 dpm clock levels
  drm/amd/display: add embedded flag to dml
  drm/amd/display: fix avoid_split for dcn2+ validation

Eric Yang (2):
  drm/amd/display: move wm ranges reporting to end of init hw
  drm/amd/display: fix hubbub deadline programing

Jordan Lazare (1):
  drm/amd/display: Remove superfluous assert

Joshua Aberback (1):
  drm/amd/display: Apply vactive dram clock change workaround to dcn2
DMLv2

Jun Lei (4):
  drm/amd/display: add 50us buffer as WA for pstate switch in active
  drm/amd/display: add odm visual confirm
  drm/amd/display: add flag to allow diag to force enumerate edp
  drm/amd/display: do not synchronize "drr" displays

Krunoslav Kovac (1):
  drm/amd/display: Only use EETF when maxCL > max display

Lewis Huang (1):
  drm/amd/display: take signal type from link

Michael Strauss (3):
  drm/amd/display: Fix MPO & pipe split on 3-pipe dcn2x
  drm/amd/display: Passive DP->HDMI dongle detection fix
  drm/amd/display: Disable force_single_disp_pipe_split on DCN2+

Noah Abradjian (1):
  drm/amd/display: Make clk mgr the only dto update point

Paul Hsieh (1):
  drm/amd/display: audio endpoint cannot switch

Reza Amini (1):
  drm/amd/display: Add center mode for integer scaling in DC

Roman Li (2):
  drm/amd/display: Add debugfs entry for reading psr state
  drm/amd/display: Enable PSR

Sung Lee (1):
  drm/amd/display: Do not call update bounding box on dc create

Yogesh Mohan Marimuthu (1):
  drm/amd/display: map TRANSMITTER_UNIPHY_x to LINK_REGS_x

Yongqiang Sun (2):
  drm/amd/display: Add unknown clk state.
  drm/amd/display: enable vm by default for rn.

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 133 -
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  21 ++
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |   1 +
 .../display/dc/clk_mgr/dce100/dce_clk_mgr.c   |  14 +-
 .../dc/clk_mgr/dce112/dce112_clk_mgr.c|   4 +-
 .../display/dc/clk_mgr/dcn10/rv1_clk_mgr.c|  10 +-
 .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c  |  38 ++-
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 178 ++--
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h |   7 -
 .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c   |   2 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c  |   8 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 149 ++
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c |  24 +-
 .../gpu/drm/amd/display/dc/core/dc_resource.c |  54 +++-
 drivers/gpu/drm/amd/display/dc/dc.h   |   9 +-
 drivers/gpu/drm/amd/display/dc/dc_ddc_types.h |   3 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c  |   5 +-
 .../amd/display/dc/dce100/dce100_resource.c   |  37 ++-
 .../display/dc/dce110/dce110_hw_sequencer.c   |  11 +-
 .../amd/display/dc/dce110/dce110_resource.c   |  37 ++-
 .../amd/display/dc/dce112/dce112_resource.c   |  37 ++-
 .../amd/display/dc/dce120/dce120_resource.c   |  37 ++-
 .../drm/amd/display/dc/dce80/dce80_resource.c |  37 ++-
 .../amd/display/dc/dcn10/dcn10_cm_common.c|  22 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |   4 +
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |  28 +-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  33 ++-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.h|   4 +-
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 260 +++---
 .../drm/amd/display/dc/dcn20/dcn20_resource.h |  31 +++
 .../drm/amd/display/dc/dcn21/dcn21_resource.c |  78 +-
 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h|   4 +-
 .../dc/dml/dcn20/display_mode_vba_20.c|   3 +-
 .../dc/dml/dcn20/display_mode_vba_20v2.c  |   3 +-
 .../amd/display/dc/dml/display_mode_structs.h |   1 +
 .../drm/amd/display/dc/dml/display_mode_vba.c |   3 +
 .../drm/amd/display/dc/dml/display_mode_vba.h |   1 +
 .../gpu/drm/amd/display/dc/inc/core_types.h   |   4 -
 .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h   |   3 +-
 .../amd/display/dc/inc/hw/clk_mgr_internal.h  |   2 -
 .../amd/display/modules/color/color_gamma.c   |  51 +++-
 41 files changed, 1074 insertions(+), 317 deletions(-)

-- 
2.23.0


Re: Spontaneous reboots when using RX 560

2019-10-17 Thread Sylvain Munaut
>  From the hardware point of view the only thing which comes to mind is
> that you somehow triggered the ESD protection.
>
> I assume you can rule out an unstable physical connection (because it
> works on windows), so the only thing left is that there is something
> very very badly going wrong with power management.
>
> Have you "tuned" the power tables on the board somehow?

Nope, not at all.

In windows, I actually had noticed that before I had installed the
Asrock utility for the card, it was staying at its lowest clock.
I had the Radeon / AMD drivers installed of course, but not the vendor
tools for the board. Once I installed that, it started automatically
going to higher power state as the load varied. And it's set to the
"default" profile.

On linux I haven't done anything. Just a fresh Ubuntu 19.10 install
with amdgpu loaded. Not sure if I have anything else to do. I'm not
even sure how to monitor the card frequency / voltage on linux.


> Or maybe multiple GPUs connected to the same power supply?

That machine has another GPU, a NVidia one in the first x16 slot. The
Nvidia GPU has a PCIe power connector going to it.
The RX 560 board (
https://www.asrock.com/Graphics-Card/AMD/Phantom%20Gaming%20Radeon%20RX560%202G/
) doesn't have any additional PCIe power input, so it gets all its
power from the PCIe slot itself.

The PC has a 650W good quality Corsair power supply, and during all
theses tests the NVidia GPU was idle (not even a xserver launched on
it or nothing), and the fan PSU didn't even spin up (it doesn't spin
if power is < 350 W), so I think it has plenty of margin.


Cheers,

Sylvain
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v2] drm/amd/display: Modify display link stream setup sequence.

2019-10-17 Thread Liu, Zhan
[Why]
This patch is for fixing Navi14 pink screen issue. With this
patch, stream->link->link_enc->funcs->setup will be called
twice: this will make sure GC_SEND is set to 1. Though we
still need to look into why the issue only happens on
Linux, but not on Windows side.

[How]
Call stream->link->link_enc->funcs->setup twice.

Signed-off-by: Zhan liu 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 935053664160..8683e8613ec2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2842,6 +2842,12 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);

+   /* Hack on Navi14: fixes Navi14 HDMI pink screen issue */
+   if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+   stream->link->link_enc->funcs->setup(
+   stream->link->link_enc,
+   pipe_ctx->stream->signal);
+
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
--
2.17.0
___
amd-gfx mailing list
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
amd-gfx@lists.freedesktop.org
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdgpu: fix compiler warnings for df perfmons

2019-10-17 Thread Yang, Philip
Reviewed-by: Philip Yang 

On 2019-10-17 1:56 p.m., Kim, Jonathan wrote:
> fixing compiler warnings in df v3.6 for c-state toggle and pmc count.
> 
> Change-Id: I74f8f1eafccf523a89d60d005e3549235f75c6b8
> Signed-off-by: Jonathan Kim 
> ---
>   drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 4 ++--
>   1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c 
> b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> index f403c62c944e..e1cf7e9c616a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> @@ -93,7 +93,7 @@ const struct attribute_group *df_v3_6_attr_groups[] = {
>   NULL
>   };
>   
> -static df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
> +static int df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
>   {
>   int r = 0;
>   
> @@ -546,7 +546,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device 
> *adev,
> uint64_t config,
> uint64_t *count)
>   {
> - uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
> + uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
>   *count = 0;
>   
>   switch (adev->asic_type) {
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons

2019-10-17 Thread Kim, Jonathan
Thanks for the catch Philip.  I must have missed this with the renoir warnings.
I sent fix.

Jon

-Original Message-
From: Yang, Philip  
Sent: Thursday, October 17, 2019 1:40 PM
To: Quan, Evan ; Kim, Jonathan ; 
amd-gfx@lists.freedesktop.org
Cc: Kuehling, Felix 
Subject: Re: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons

I got compiler warnings after update this morning, because the variables are 
not initialized in df_v3_6_set_df_cstate() return failed path.

  CC [M]  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.o
   CC [M]  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.o
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c:96:8:
 
warning: return type defaults to ‘int’ [-Wreturn-type]
  static df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
 ^
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c: 
In function ‘df_v3_6_pmc_get_count’:
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c:564:22:
 
warning: ‘hi_val’ may be used uninitialized in this function 
[-Wmaybe-uninitialized]
*count  = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
   ^~~
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c:564:47:
 
warning: ‘lo_val’ may be used uninitialized in this function 
[-Wmaybe-uninitialized]
*count  = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
^~~
   CC [M]  drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.o


Regards,
Philip

On 2019-10-16 10:54 p.m., Quan, Evan wrote:
> Reviewed-by: Evan Quan 
> 
>> -Original Message-
>> From: Kim, Jonathan 
>> Sent: 2019年10月17日 10:06
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Kuehling, Felix ; Quan, Evan 
>> ; Kim, Jonathan ; Kim, 
>> Jonathan 
>> Subject: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons
>>
>> read or writes to df registers when gpu df is in c-states will result 
>> in hang.  df c-states should be disabled prior to read or writes then 
>> re-enabled after read or writes.
>>
>> v2: use old powerplay routines for vega20
>>
>> Change-Id: I6d5a83e4fe13e29c73dfb03a94fe7c611e867fec
>> Signed-off-by: Jonathan Kim 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 36
>> +++-
>>   1 file changed, 35 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> index 16fbd2bc8ad1..f403c62c944e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> @@ -93,6 +93,21 @@ const struct attribute_group 
>> *df_v3_6_attr_groups[] = {
>>  NULL
>>   };
>>
>> +static df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow) 
>> +{
>> +int r = 0;
>> +
>> +if (is_support_sw_smu(adev)) {
>> +r = smu_set_df_cstate(>smu, allow);
>> +} else if (adev->powerplay.pp_funcs
>> +&& adev->powerplay.pp_funcs->set_df_cstate) {
>> +r = adev->powerplay.pp_funcs->set_df_cstate(
>> +adev->powerplay.pp_handle, allow);
>> +}
>> +
>> +return r;
>> +}
>> +
>>   static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
>>   uint32_t ficaa_val)
>>   {
>> @@ -102,6 +117,9 @@ static uint64_t df_v3_6_get_fica(struct 
>> amdgpu_device *adev,
>>  address = adev->nbio.funcs->get_pcie_index_offset(adev);
>>  data = adev->nbio.funcs->get_pcie_data_offset(adev);
>>
>> +if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
>> +return 0x;
>> +
>>  spin_lock_irqsave(>pcie_idx_lock, flags);
>>  WREG32(address,
>> smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
>>  WREG32(data, ficaa_val);
>> @@ -114,6 +132,8 @@ static uint64_t df_v3_6_get_fica(struct 
>> amdgpu_device *adev,
>>
>>  spin_unlock_irqrestore(>pcie_idx_lock, flags);
>>
>> +df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
>> +
>>  return (((ficadh_val & 0x) << 32) | ficadl_val);
>>   }
>>
>> @@ -125,6 +145,9 @@ static void df_v3_6_set_fica(struct amdgpu_device 
>> *adev, uint32_t ficaa_val,
>>  address = adev->nbio.funcs->get_pcie_index_offset(adev);
>>  data = adev->nbio.funcs->get_pcie_data_offset(adev);
>>
>> +if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
>> +return;
>> +
>>  spin_lock_irqsave(>pcie_idx_lock, flags);
>>  WREG32(address,
>> smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
>>  WREG32(data, ficaa_val);
>> @@ -134,8 +157,9 @@ static void df_v3_6_set_fica(struct amdgpu_device 
>> *adev, uint32_t ficaa_val,
>>
>>  WREG32(address,
>> smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
>>  WREG32(data, ficadh_val);
>> -
>>  spin_unlock_irqrestore(>pcie_idx_lock, flags);
>> +
>> +df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
>>   }
>>
>>   /*
>> @@ -153,12 +177,17 @@ static void df_v3_6_perfmon_rreg(struct 
>> amdgpu_device 

[PATCH] drm/amdgpu: fix compiler warnings for df perfmons

2019-10-17 Thread Kim, Jonathan
fixing compiler warnings in df v3.6 for c-state toggle and pmc count.

Change-Id: I74f8f1eafccf523a89d60d005e3549235f75c6b8
Signed-off-by: Jonathan Kim 
---
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c 
b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index f403c62c944e..e1cf7e9c616a 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -93,7 +93,7 @@ const struct attribute_group *df_v3_6_attr_groups[] = {
NULL
 };
 
-static df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
+static int df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
 {
int r = 0;
 
@@ -546,7 +546,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device 
*adev,
  uint64_t config,
  uint64_t *count)
 {
-   uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+   uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
*count = 0;
 
switch (adev->asic_type) {
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons

2019-10-17 Thread Yang, Philip
I got compiler warnings after update this morning, because the variables 
are not initialized in df_v3_6_set_df_cstate() return failed path.

  CC [M]  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.o
   CC [M]  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.o
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c:96:8:
 
warning: return type defaults to ‘int’ [-Wreturn-type]
  static df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
 ^
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c: 
In function ‘df_v3_6_pmc_get_count’:
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c:564:22:
 
warning: ‘hi_val’ may be used uninitialized in this function 
[-Wmaybe-uninitialized]
*count  = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
   ^~~
/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/df_v3_6.c:564:47:
 
warning: ‘lo_val’ may be used uninitialized in this function 
[-Wmaybe-uninitialized]
*count  = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
^~~
   CC [M]  drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.o


Regards,
Philip

On 2019-10-16 10:54 p.m., Quan, Evan wrote:
> Reviewed-by: Evan Quan 
> 
>> -Original Message-
>> From: Kim, Jonathan 
>> Sent: 2019年10月17日 10:06
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Kuehling, Felix ; Quan, Evan
>> ; Kim, Jonathan ; Kim,
>> Jonathan 
>> Subject: [PATCH] drm/amdgpu: disable c-states on xgmi perfmons
>>
>> read or writes to df registers when gpu df is in c-states will result in
>> hang.  df c-states should be disabled prior to read or writes then
>> re-enabled after read or writes.
>>
>> v2: use old powerplay routines for vega20
>>
>> Change-Id: I6d5a83e4fe13e29c73dfb03a94fe7c611e867fec
>> Signed-off-by: Jonathan Kim 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 36
>> +++-
>>   1 file changed, 35 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> index 16fbd2bc8ad1..f403c62c944e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
>> @@ -93,6 +93,21 @@ const struct attribute_group *df_v3_6_attr_groups[] =
>> {
>>  NULL
>>   };
>>
>> +static df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
>> +{
>> +int r = 0;
>> +
>> +if (is_support_sw_smu(adev)) {
>> +r = smu_set_df_cstate(>smu, allow);
>> +} else if (adev->powerplay.pp_funcs
>> +&& adev->powerplay.pp_funcs->set_df_cstate) {
>> +r = adev->powerplay.pp_funcs->set_df_cstate(
>> +adev->powerplay.pp_handle, allow);
>> +}
>> +
>> +return r;
>> +}
>> +
>>   static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
>>   uint32_t ficaa_val)
>>   {
>> @@ -102,6 +117,9 @@ static uint64_t df_v3_6_get_fica(struct
>> amdgpu_device *adev,
>>  address = adev->nbio.funcs->get_pcie_index_offset(adev);
>>  data = adev->nbio.funcs->get_pcie_data_offset(adev);
>>
>> +if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
>> +return 0x;
>> +
>>  spin_lock_irqsave(>pcie_idx_lock, flags);
>>  WREG32(address,
>> smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
>>  WREG32(data, ficaa_val);
>> @@ -114,6 +132,8 @@ static uint64_t df_v3_6_get_fica(struct
>> amdgpu_device *adev,
>>
>>  spin_unlock_irqrestore(>pcie_idx_lock, flags);
>>
>> +df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
>> +
>>  return (((ficadh_val & 0x) << 32) | ficadl_val);
>>   }
>>
>> @@ -125,6 +145,9 @@ static void df_v3_6_set_fica(struct amdgpu_device
>> *adev, uint32_t ficaa_val,
>>  address = adev->nbio.funcs->get_pcie_index_offset(adev);
>>  data = adev->nbio.funcs->get_pcie_data_offset(adev);
>>
>> +if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
>> +return;
>> +
>>  spin_lock_irqsave(>pcie_idx_lock, flags);
>>  WREG32(address,
>> smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
>>  WREG32(data, ficaa_val);
>> @@ -134,8 +157,9 @@ static void df_v3_6_set_fica(struct amdgpu_device
>> *adev, uint32_t ficaa_val,
>>
>>  WREG32(address,
>> smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
>>  WREG32(data, ficadh_val);
>> -
>>  spin_unlock_irqrestore(>pcie_idx_lock, flags);
>> +
>> +df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
>>   }
>>
>>   /*
>> @@ -153,12 +177,17 @@ static void df_v3_6_perfmon_rreg(struct
>> amdgpu_device *adev,
>>  address = adev->nbio.funcs->get_pcie_index_offset(adev);
>>  data = adev->nbio.funcs->get_pcie_data_offset(adev);
>>
>> +if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
>> +return;
>> +
>>  spin_lock_irqsave(>pcie_idx_lock, flags);
>>  WREG32(address, lo_addr);
>>  *lo_val = RREG32(data);
>>  

Re: [PATCH] drm/amdgpu/display: fix compile error

2019-10-17 Thread Li, Sun peng (Leo)
This has already been fixed here:
https://patchwork.freedesktop.org/patch/336195/

Should be mirrored on Alex's tree soon.

Thanks,
Leo

On 2019-10-17 2:19 a.m., Chen Wandun wrote:
> From: Chenwandun 
> 
> drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_resource.c:1913:48: 
> error: struct dc_crtc_timing_flags has no member named DSC
>if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
>   ^
> drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_resource.c:1914:73: 
> error: struct dc_crtc_timing has no member named dsc_cfg
>pipes[pipe_cnt].dout.output_bpp = 
> res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
>   ^
> Signed-off-by: Chenwandun 
> ---
>  drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
> b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> index 914e378..4f03318 100644
> --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> @@ -1910,8 +1910,10 @@ int dcn20_populate_dml_pipes_from_context(
>   pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
>   }
>  
> +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
>   if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
>   pipes[pipe_cnt].dout.output_bpp = 
> res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
> +#endif
>  
>   /* todo: default max for now, until there is logic reflecting 
> this in dc*/
>   pipes[pipe_cnt].dout.output_bpc = 12;
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [RFC PATCH RESEND] drm/amd/display: Add back missing backlight level rounding

2019-10-17 Thread Li, Sun peng (Leo)
Thanks for the detailed notes! See reply inline.

On 2019-10-15 4:03 p.m., Lukáš Krejčí wrote:
> [Why]
> Having the rounding of the backlight value restored to the way it was
> seemingly gets rid of backlight flickering on certain Stoney Ridge
> laptops.
> 
> [How]
> Rescale the backlight level between min and max input signal value and
> round it to a number between 0x0 and 0xFF. Then, use the rounding mode
> that was previously in driver_set_backlight_level() and
> dmcu_set_backlight_level(), i.e. rescale the backlight level between 0x0
> and 0x1000 by multiplying it by 0x101 and use the most significant bit
> of the fraction (or in this case the 8th bit of the value because it's
> the same thing, e.g. C3 * 0x101 = 0xC3C3 and C3 * 0x10101 = 0xC3C3C3) to
> round it.
> 
> Fixes: 262485a50fd4 ("drm/amd/display: Expand dc to use 16.16 bit backlight")
> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204957
> Signed-off-by: Lukáš Krejčí 
> ---
> 
> Notes:
> Bug:
> - Can be reproduced on HP 15-rb020nc (Stoney Ridge R2 E2-9000e APU) and
>   Acer Aspire 3 A315-21G-91JL (Stoney Ridge R3 A4-9120 APU).
> 
> - For me, the bug is inconsistent - it does not happen on every boot,
>   but if it does happen, it's usually within three minutes after bootup.
> 
> - It concerns the backlight. That means it can be reproduced on the
>   framebuffer console.
> 
> - Reproduces on Arch Linux (custom built) live CD, linux kernel v5.3.2
>   and Ubuntu 19.04, kernel-ppa/mainline v5.0.0-rc1, v5.0.0, v5.3.2, 
> v5.3.4,
>   v5.4-rc2.
> 
> - Can not be reproduced on kernel v5.3.4 with this patch applied or on
>   v4.20.0, 4.20.17, 4.19.75 (this bug is a regression).
> 
> - The other person that reproduced the issue (see the Bugzilla link
>   above) confirmed that the patch works for them too.
> 
> Patch:
> - Is the comment modified by this commit correct? Both
>   driver_set_backlight_level() and dmcu_set_backlight_level() check the
>   17th bit of `brightness` aka `backlight_pwm_u16_16`, but
>   262485a50fd4532a8d71165190adc7a0a19bcc9e ("drm/amd/display: Expand dc
>   to use 16.16 bit backlight") specifically mentions 0x as the max
>   backlight value> 
> - use_smooth_brightness is false (no DMCU firmware available) on my
>   laptop, so the other code path (dmcu_set_backlight_level()) is
>   untested.
> 
> - I'm not sure why the rounding fixes the issue and whether this
>   function is the right place to add back the rounding (and whether it
>   even is the right way to solve the issue), so that's why this patch is
>   RFC.

We've seen some similar issues when fractional duty cycles are enabled for 
backlight pwm.
I attached a hack to the bugzilla ticket that disables it, please give that 
patch a shot
first. I'd rather not change this calculation for all panels if the flickering 
issue is
only a quirk for some.

Thanks,
Leo

> 
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c   | 17 +++--
>  1 file changed, 11 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index a52f0b13a2c8..af9a5f46b671 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -2083,17 +2083,22 @@ static int amdgpu_dm_backlight_update_status(struct 
> backlight_device *bd)
>* The brightness input is in the range 0-255
>* It needs to be rescaled to be between the
>* requested min and max input signal
> -  *
> -  * It also needs to be scaled up by 0x101 to
> -  * match the DC interface which has a range of
> -  * 0 to 0x
>*/
>   brightness =
>   brightness
> - * 0x101
> + * 0x100
>   * (caps.max_input_signal - caps.min_input_signal)
>   / AMDGPU_MAX_BL_LEVEL
> - + caps.min_input_signal * 0x101;
> + + caps.min_input_signal * 0x100;
> +
> + brightness = (brightness >> 8) + ((brightness >> 7) & 1);
> + /*
> +  * It also needs to be scaled up by 0x101 and
> +  * rounded off to match the DC interface which
> +  * has a range of 0 to 0x1
> +  */
> + brightness *= 0x101;
> + brightness += (brightness >> 7) & 1;
>  
>   if (dc_link_set_backlight_level(dm->backlight_link,
>   brightness, 0))
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 1/2] drm/amdgpu/vce: fix allocation size in enc ring test

2019-10-17 Thread Christian König

Am 17.10.19 um 17:44 schrieb Alex Deucher:

We need to allocate a large enough buffer for the
feedback buffer, otherwise the IB test can overwrite
other memory.

Signed-off-by: Alex Deucher 


Acked-by: Christian König  for the series.


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 20 +++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h |  1 +
  2 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b70b3c45bb29..65044b1b3d4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, 
struct drm_file *filp)
   * Open up a stream for HW test
   */
  int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
  struct dma_fence **fence)
  {
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
-   uint64_t dummy;
+   uint64_t addr;
int i, r;
  
  	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, );

@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
uint32_t handle,
  
  	ib = >ibs[0];
  
-	dummy = ib->gpu_addr + 1024;

+   addr = amdgpu_bo_gpu_offset(bo);
  
  	/* stitch together an VCE create msg */

ib->length_dw = 0;
@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
uint32_t handle,
  
  	ib->ptr[ib->length_dw++] = 0x0014; /* len */

ib->ptr[ib->length_dw++] = 0x0505; /* feedback buffer */
-   ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-   ib->ptr[ib->length_dw++] = dummy;
+   ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+   ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0001;
  
  	for (i = ib->length_dw; i < ib_size_dw; ++i)

@@ -1110,13 +,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
  int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
  {
struct dma_fence *fence = NULL;
+   struct amdgpu_bo *bo = NULL;
long r;
  
  	/* skip vce ring1/2 ib test for now, since it's not reliable */

if (ring != >adev->vce.ring[0])
return 0;
  
-	r = amdgpu_vce_get_create_msg(ring, 1, NULL);

+   r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ , NULL, NULL);
+   if (r)
+   return r;
+
+   r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
  
@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
  
  error:

dma_fence_put(fence);
+   amdgpu_bo_unreserve(bo);
+   amdgpu_bo_unref();
return r;
  }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 30ea54dd9117..e802f7d9db0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
  int amdgpu_vce_suspend(struct amdgpu_device *adev);
  int amdgpu_vce_resume(struct amdgpu_device *adev);
  int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
  struct dma_fence **fence);
  int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
   bool direct, struct dma_fence **fence);


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: Spontaneous reboots when using RX 560

2019-10-17 Thread Koenig, Christian
Am 17.10.19 um 16:29 schrieb Sylvain Munaut:
> Hi,
>
>
>>> I have RX 560 2G card. It's plugged into a 16x physical / 4x
>>> electrical slot of a X570 chipset motherboard with a Ryzen 3700X CPU.
>>> The hardware works fine and is stable under Windows (tested with
>>> games, benchmarks, stress-tests, ...)
>> Does booting with pci=noats on the kernel command line in grub fix the issue?
> It doesn't :/
>
> Message is slightly different but same idea :
>
> [   83.704035] amdgpu :06:00.0: AMD-Vi: Event logged
> [IO_PAGE_FAULT domain=0x address=0x0 flags=0x0020]
> [   88.732685] [drm:amdgpu_dm_commit_planes.constprop.0 [amdgpu]]
> *ERROR* Waiting for fences timed out or interrupted!
> [   92.074379] ixgbe :04:00.1: Adapter removed
> [   93.480989] igb :07:00.0 enp7s0: PCIe link lost
>
> So it screws up the PCIe very badly :/
> Specifically seems to be everything connected to the X570 chipset.

 From the hardware point of view the only thing which comes to mind is 
that you somehow triggered the ESD protection.

I assume you can rule out an unstable physical connection (because it 
works on windows), so the only thing left is that there is something 
very very badly going wrong with power management.

Have you "tuned" the power tables on the board somehow? Or maybe 
multiple GPUs connected to the same power supply?

Regards,
Christian.

>
> Cheers,
>
>  Sylvain
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 1/2] drm/amdgpu/vce: fix allocation size in enc ring test

2019-10-17 Thread James Zhu
Reviewed-by: James Zhu  for this series

On 2019-10-17 11:44 a.m., Alex Deucher wrote:
> We need to allocate a large enough buffer for the
> feedback buffer, otherwise the IB test can overwrite
> other memory.
>
> Signed-off-by: Alex Deucher 
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 20 +++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h |  1 +
>   2 files changed, 16 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index b70b3c45bb29..65044b1b3d4c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device 
> *adev, struct drm_file *filp)
>* Open up a stream for HW test
>*/
>   int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> +   struct amdgpu_bo *bo,
> struct dma_fence **fence)
>   {
>   const unsigned ib_size_dw = 1024;
>   struct amdgpu_job *job;
>   struct amdgpu_ib *ib;
>   struct dma_fence *f = NULL;
> - uint64_t dummy;
> + uint64_t addr;
>   int i, r;
>   
>   r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, );
> @@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
> uint32_t handle,
>   
>   ib = >ibs[0];
>   
> - dummy = ib->gpu_addr + 1024;
> + addr = amdgpu_bo_gpu_offset(bo);
>   
>   /* stitch together an VCE create msg */
>   ib->length_dw = 0;
> @@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
> uint32_t handle,
>   
>   ib->ptr[ib->length_dw++] = 0x0014; /* len */
>   ib->ptr[ib->length_dw++] = 0x0505; /* feedback buffer */
> - ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
> - ib->ptr[ib->length_dw++] = dummy;
> + ib->ptr[ib->length_dw++] = upper_32_bits(addr);
> + ib->ptr[ib->length_dw++] = addr;
>   ib->ptr[ib->length_dw++] = 0x0001;
>   
>   for (i = ib->length_dw; i < ib_size_dw; ++i)
> @@ -1110,13 +,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring 
> *ring)
>   int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>   {
>   struct dma_fence *fence = NULL;
> + struct amdgpu_bo *bo = NULL;
>   long r;
>   
>   /* skip vce ring1/2 ib test for now, since it's not reliable */
>   if (ring != >adev->vce.ring[0])
>   return 0;
>   
> - r = amdgpu_vce_get_create_msg(ring, 1, NULL);
> + r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
> +   AMDGPU_GEM_DOMAIN_VRAM,
> +   , NULL, NULL);
> + if (r)
> + return r;
> +
> + r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
>   if (r)
>   goto error;
>   
> @@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, 
> long timeout)
>   
>   error:
>   dma_fence_put(fence);
> + amdgpu_bo_unreserve(bo);
> + amdgpu_bo_unref();
>   return r;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
> index 30ea54dd9117..e802f7d9db0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
> @@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
>   int amdgpu_vce_suspend(struct amdgpu_device *adev);
>   int amdgpu_vce_resume(struct amdgpu_device *adev);
>   int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> +   struct amdgpu_bo *bo,
> struct dma_fence **fence);
>   int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
>  bool direct, struct dma_fence **fence);
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v4] drm/amd/display: Add MST atomic routines

2019-10-17 Thread mikita.lipski
From: Mikita Lipski 

- Adding encoder atomic check to find vcpi slots for a connector
- Using DRM helper functions to calculate PBN
- Adding connector atomic check to release vcpi slots if connector
loses CRTC
- Calculate  PBN and VCPI slots only once during atomic
check and store them on crtc_state to eliminate
redundant calculation
- Call drm_dp_mst_atomic_check to verify validity of MST topology
during state atomic check

v2:
- squashed previous 3 separate patches
- removed DSC PBN calculation,
- added PBN and VCPI slots properties to amdgpu connector

v3:
- moved vcpi_slots and pbn properties to dm_crtc_state and dc_stream_state
- updates stream's vcpi_slots and pbn on commit
- separated patch from the DSC MST series

v4:
- set vcpi_slots and pbn properties to dm_connector_state
- copy porperties from connector state on to crtc state

Cc: Jerry Zuo 
Cc: Harry Wentland 
Cc: Nicholas Kazlauskas 
Cc: Lyude Paul 
Signed-off-by: Mikita Lipski 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 72 +--
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  6 ++
 .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 42 +--
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 32 +
 drivers/gpu/drm/amd/display/dc/dc_stream.h|  3 +
 5 files changed, 112 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 10cce584719f..1f1146a4e85e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3811,6 +3811,11 @@ create_stream_for_sink(struct amdgpu_dm_connector 
*aconnector,
 
update_stream_signal(stream, sink);
 
+   if(dm_state){
+   stream->vcpi_slots = dm_state->vcpi_slots;
+   stream->pbn = dm_state->pbn;
+   }
+
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, >vsp_infopacket, 
false, false);
 
@@ -3889,6 +3894,8 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+   state->vcpi_slots = cur->vcpi_slots;
+   state->pbn = cur->pbn;
 
/* TODO Duplicate dc_stream after objects are stream object is 
flattened */
 
@@ -4157,7 +4164,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector 
*connector)
state->underscan_hborder = 0;
state->underscan_vborder = 0;
state->base.max_requested_bpc = 8;
-
+   state->vcpi_slots = 0;
+   state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
state->abm_level = amdgpu_dm_abm_level;
 
@@ -4186,7 +4194,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct 
drm_connector *connector)
new_state->underscan_enable = state->underscan_enable;
new_state->underscan_hborder = state->underscan_hborder;
new_state->underscan_vborder = state->underscan_vborder;
-
+   new_state->vcpi_slots = state->vcpi_slots;
+   new_state->pbn = state->pbn;
return _state->base;
 }
 
@@ -4587,6 +4596,37 @@ static int dm_encoder_helper_atomic_check(struct 
drm_encoder *encoder,
  struct drm_crtc_state *crtc_state,
  struct drm_connector_state 
*conn_state)
 {
+   struct drm_atomic_state *state = crtc_state->state;
+   struct drm_connector *connector = conn_state->connector;
+   struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
+   struct dm_connector_state *dm_new_connector_state = 
to_dm_connector_state(conn_state);
+   const struct drm_display_mode *adjusted_mode = 
_state->adjusted_mode;
+   struct drm_dp_mst_topology_mgr *mst_mgr;
+   struct drm_dp_mst_port *mst_port;
+   int clock, bpp = 0;
+
+   if (!aconnector->port || !aconnector->dc_sink)
+   return 0;
+
+   mst_port = aconnector->port;
+   mst_mgr = >mst_port->mst_mgr;
+
+   if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+   return 0;
+
+   if(!state->duplicated) {
+   bpp = (uint8_t)connector->display_info.bpc * 3;
+   clock = adjusted_mode->clock;
+   dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp);
+   }
+   dm_new_connector_state->vcpi_slots = 
drm_dp_atomic_find_vcpi_slots(state,
+  mst_mgr,
+  mst_port,
+  
dm_new_connector_state->pbn);
+   if (dm_new_connector_state->vcpi_slots < 0) {
+   DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", 
dm_new_connector_state->vcpi_slots);

Re: [PATCH hmm 00/15] Consolidate the mmu notifier interval_tree and locking

2019-10-17 Thread Koenig, Christian
Sending once more as text.

Am 17.10.19 um 18:26 schrieb Yang, Philip:
> On 2019-10-17 4:54 a.m., Christian König wrote:
>> Am 16.10.19 um 18:04 schrieb Jason Gunthorpe:
>>> On Wed, Oct 16, 2019 at 10:58:02AM +0200, Christian König wrote:
 Am 15.10.19 um 20:12 schrieb Jason Gunthorpe:
> From: Jason Gunthorpe 
>
> 8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp,
> hfi1,
> scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
> they only use invalidate_range_start/end and immediately check the
> invalidating range against some driver data structure to tell if the
> driver is interested. Half of them use an interval_tree, the others are
> simple linear search lists.
>
> Of the ones I checked they largely seem to have various kinds of races,
> bugs and poor implementation. This is a result of the complexity in how
> the notifier interacts with get_user_pages(). It is extremely
> difficult to
> use it correctly.
>
> Consolidate all of this code together into the core mmu_notifier and
> provide a locking scheme similar to hmm_mirror that allows the user to
> safely use get_user_pages() and reliably know if the page list still
> matches the mm.
 That sounds really good, but could you outline for a moment how that is
 archived?
>>> It uses the same basic scheme as hmm and rdma odp, outlined in the
>>> revisions to hmm.rst later on.
>>>
>>> Basically,
>>>
>>>    seq = mmu_range_read_begin();
>>>
>>>    // This is a speculative region
>>>    .. get_user_pages()/hmm_range_fault() ..
>> How do we enforce that this get_user_pages()/hmm_range_fault() doesn't
>> see outdated page table information?
>>
>> In other words how the the following race prevented:
>>
>> CPU A CPU B
>> invalidate_range_start()
>>         mmu_range_read_begin()
>>         get_user_pages()/hmm_range_fault()
>> Updating the ptes
>> invalidate_range_end()
>>
>>
>> I mean get_user_pages() tries to circumvent this issue by grabbing a
>> reference to the pages in question, but that isn't sufficient for the
>> SVM use case.
>>
>> That's the reason why we had this horrible solution with a r/w lock and
>> a linked list of BOs in an interval tree.
>>
>> Regards,
>> Christian.
> get_user_pages/hmm_range_fault() and invalidate_range_start() both are
> called while holding mm->map_sem, so they are always serialized.

Not even remotely.

For calling get_user_pages()/hmm_range_fault() you only need to hold the 
mmap_sem in read mode.

And IIRC invalidate_range_start() is sometimes called without holding 
the mmap_sem at all.

So again how are they serialized?

Regards,
Christian.

>
> Philip
>>>    // Result cannot be derferenced
>>>
>>>    take_lock(driver->update);
>>>    if (mmu_range_read_retry(, range.notifier_seq) {
>>>   // collision! The results are not correct
>>>   goto again
>>>    }
>>>
>>>    // no collision, and now under lock. Now we can de-reference the
>>> pages/etc
>>>    // program HW
>>>    // Now the invalidate callback is responsible to synchronize against
>>> changes
>>>    unlock(driver->update)
>>>
>>> Basically, anything that was using hmm_mirror correctly transisions
>>> over fairly trivially, just with the modification to store a sequence
>>> number to close that race described in the hmm commit.
>>>
>>> For something like AMD gpu I expect it to transition to use dma_fence
>>> from the notifier for coherency right before it unlocks driver->update.
>>>
>>> Jason
>>> ___
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH hmm 00/15] Consolidate the mmu notifier interval_tree and locking

2019-10-17 Thread Koenig, Christian


Am 17.10.2019 18:26 schrieb "Yang, Philip" :


On 2019-10-17 4:54 a.m., Christian König wrote:
> Am 16.10.19 um 18:04 schrieb Jason Gunthorpe:
>> On Wed, Oct 16, 2019 at 10:58:02AM +0200, Christian König wrote:
>>> Am 15.10.19 um 20:12 schrieb Jason Gunthorpe:
 From: Jason Gunthorpe 

 8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp,
 hfi1,
 scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
 they only use invalidate_range_start/end and immediately check the
 invalidating range against some driver data structure to tell if the
 driver is interested. Half of them use an interval_tree, the others are
 simple linear search lists.

 Of the ones I checked they largely seem to have various kinds of races,
 bugs and poor implementation. This is a result of the complexity in how
 the notifier interacts with get_user_pages(). It is extremely
 difficult to
 use it correctly.

 Consolidate all of this code together into the core mmu_notifier and
 provide a locking scheme similar to hmm_mirror that allows the user to
 safely use get_user_pages() and reliably know if the page list still
 matches the mm.
>>> That sounds really good, but could you outline for a moment how that is
>>> archived?
>> It uses the same basic scheme as hmm and rdma odp, outlined in the
>> revisions to hmm.rst later on.
>>
>> Basically,
>>
>>   seq = mmu_range_read_begin();
>>
>>   // This is a speculative region
>>   .. get_user_pages()/hmm_range_fault() ..
>
> How do we enforce that this get_user_pages()/hmm_range_fault() doesn't
> see outdated page table information?
>
> In other words how the the following race prevented:
>
> CPU A CPU B
> invalidate_range_start()
>mmu_range_read_begin()
>get_user_pages()/hmm_range_fault()
> Updating the ptes
> invalidate_range_end()
>
>
> I mean get_user_pages() tries to circumvent this issue by grabbing a
> reference to the pages in question, but that isn't sufficient for the
> SVM use case.
>
> That's the reason why we had this horrible solution with a r/w lock and
> a linked list of BOs in an interval tree.
>
> Regards,
> Christian.
get_user_pages/hmm_range_fault() and invalidate_range_start() both are
called while holding mm->map_sem, so they are always serialized.

Not even remotely.

For calling get_user_pages()/hmm_range_fault() you only need to hold the 
mmap_sem in read mode.

And IIRC invalidate_range_start() is sometimes called without holding the 
mmap_sem at all.

So again how are they serialized?

Regards,
Christian.


Philip
>
>>   // Result cannot be derferenced
>>
>>   take_lock(driver->update);
>>   if (mmu_range_read_retry(, range.notifier_seq) {
>>  // collision! The results are not correct
>>  goto again
>>   }
>>
>>   // no collision, and now under lock. Now we can de-reference the
>> pages/etc
>>   // program HW
>>   // Now the invalidate callback is responsible to synchronize against
>> changes
>>   unlock(driver->update)
>>
>> Basically, anything that was using hmm_mirror correctly transisions
>> over fairly trivially, just with the modification to store a sequence
>> number to close that race described in the hmm commit.
>>
>> For something like AMD gpu I expect it to transition to use dma_fence
>> from the notifier for coherency right before it unlocks driver->update.
>>
>> Jason
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH hmm 00/15] Consolidate the mmu notifier interval_tree and locking

2019-10-17 Thread Yang, Philip


On 2019-10-17 4:54 a.m., Christian König wrote:
> Am 16.10.19 um 18:04 schrieb Jason Gunthorpe:
>> On Wed, Oct 16, 2019 at 10:58:02AM +0200, Christian König wrote:
>>> Am 15.10.19 um 20:12 schrieb Jason Gunthorpe:
 From: Jason Gunthorpe 

 8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, 
 hfi1,
 scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
 they only use invalidate_range_start/end and immediately check the
 invalidating range against some driver data structure to tell if the
 driver is interested. Half of them use an interval_tree, the others are
 simple linear search lists.

 Of the ones I checked they largely seem to have various kinds of races,
 bugs and poor implementation. This is a result of the complexity in how
 the notifier interacts with get_user_pages(). It is extremely 
 difficult to
 use it correctly.

 Consolidate all of this code together into the core mmu_notifier and
 provide a locking scheme similar to hmm_mirror that allows the user to
 safely use get_user_pages() and reliably know if the page list still
 matches the mm.
>>> That sounds really good, but could you outline for a moment how that is
>>> archived?
>> It uses the same basic scheme as hmm and rdma odp, outlined in the
>> revisions to hmm.rst later on.
>>
>> Basically,
>>
>>   seq = mmu_range_read_begin();
>>
>>   // This is a speculative region
>>   .. get_user_pages()/hmm_range_fault() ..
> 
> How do we enforce that this get_user_pages()/hmm_range_fault() doesn't 
> see outdated page table information?
> 
> In other words how the the following race prevented:
> 
> CPU A CPU B
> invalidate_range_start()
>        mmu_range_read_begin()
>        get_user_pages()/hmm_range_fault()
> Updating the ptes
> invalidate_range_end()
> 
> 
> I mean get_user_pages() tries to circumvent this issue by grabbing a 
> reference to the pages in question, but that isn't sufficient for the 
> SVM use case.
> 
> That's the reason why we had this horrible solution with a r/w lock and 
> a linked list of BOs in an interval tree.
> 
> Regards,
> Christian.
get_user_pages/hmm_range_fault() and invalidate_range_start() both are 
called while holding mm->map_sem, so they are always serialized.

Philip
> 
>>   // Result cannot be derferenced
>>
>>   take_lock(driver->update);
>>   if (mmu_range_read_retry(, range.notifier_seq) {
>>  // collision! The results are not correct
>>  goto again
>>   }
>>
>>   // no collision, and now under lock. Now we can de-reference the 
>> pages/etc
>>   // program HW
>>   // Now the invalidate callback is responsible to synchronize against 
>> changes
>>   unlock(driver->update)
>>
>> Basically, anything that was using hmm_mirror correctly transisions
>> over fairly trivially, just with the modification to store a sequence
>> number to close that race described in the hmm commit.
>>
>> For something like AMD gpu I expect it to transition to use dma_fence
>> from the notifier for coherency right before it unlocks driver->update.
>>
>> Jason
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amdgpu/powerplay: use local renoir array sizes for clock fetching

2019-10-17 Thread Alex Deucher
To avoid walking past the end of the arrays since the PP_SMU
defines don't match the renoir defines.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index fa314c275a82..f0c8d1ad2a80 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -427,22 +427,22 @@ static int renoir_get_dpm_clock_table(struct smu_context 
*smu, struct dpm_clocks
if (!clock_table || !table)
return -EINVAL;
 
-   for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
+   for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
}
 
-   for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) {
+   for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
}
 
-   for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
+   for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
}
 
-   for (i = 0; i<  PP_SMU_NUM_MEMCLK_DPM_LEVELS; i++) {
+   for (i = 0; i<  NUM_MEMCLK_DPM_LEVELS; i++) {
clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
}
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/2] drm/amdgpu/vce: make some functions static

2019-10-17 Thread Alex Deucher
They are not used outside of the file they are defined in.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 15 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h |  5 -
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 65044b1b3d4c..703677f2ff6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
 MODULE_FIRMWARE(FIRMWARE_VEGA20);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+struct amdgpu_bo *bo,
+struct dma_fence **fence);
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t 
handle,
+ bool direct, struct dma_fence **fence);
 
 /**
  * amdgpu_vce_init - allocate memory, load vce firmware
@@ -428,9 +433,9 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, 
struct drm_file *filp)
  *
  * Open up a stream for HW test
  */
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+struct amdgpu_bo *bo,
+struct dma_fence **fence)
 {
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
@@ -508,8 +513,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
uint32_t handle,
  *
  * Close up a stream for HW test or if userspace failed to do so
  */
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-  bool direct, struct dma_fence **fence)
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t 
handle,
+ bool direct, struct dma_fence **fence)
 {
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index e802f7d9db0a..d6d83a3ec803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -58,11 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
 int amdgpu_vce_entity_init(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence);
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-  bool direct, struct dma_fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file 
*filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/2] drm/amdgpu/vce: fix allocation size in enc ring test

2019-10-17 Thread Alex Deucher
We need to allocate a large enough buffer for the
feedback buffer, otherwise the IB test can overwrite
other memory.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 20 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h |  1 +
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b70b3c45bb29..65044b1b3d4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, 
struct drm_file *filp)
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
  struct dma_fence **fence)
 {
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
-   uint64_t dummy;
+   uint64_t addr;
int i, r;
 
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, );
@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
uint32_t handle,
 
ib = >ibs[0];
 
-   dummy = ib->gpu_addr + 1024;
+   addr = amdgpu_bo_gpu_offset(bo);
 
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
uint32_t handle,
 
ib->ptr[ib->length_dw++] = 0x0014; /* len */
ib->ptr[ib->length_dw++] = 0x0505; /* feedback buffer */
-   ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-   ib->ptr[ib->length_dw++] = dummy;
+   ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+   ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0001;
 
for (i = ib->length_dw; i < ib_size_dw; ++i)
@@ -1110,13 +,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
struct dma_fence *fence = NULL;
+   struct amdgpu_bo *bo = NULL;
long r;
 
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != >adev->vce.ring[0])
return 0;
 
-   r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+   r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ , NULL, NULL);
+   if (r)
+   return r;
+
+   r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
 
@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, 
long timeout)
 
 error:
dma_fence_put(fence);
+   amdgpu_bo_unreserve(bo);
+   amdgpu_bo_unref();
return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 30ea54dd9117..e802f7d9db0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
  struct dma_fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
   bool direct, struct dma_fence **fence);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: Modify display link stream setup sequence.

2019-10-17 Thread Siqueira, Rodrigo
Hi Zhan,

I tested your patch, and it fixed the issue. I have some notes:

1. Your patch does not apply smoothly, try to rebase your branch (I
manually fix it for testing your patch).
2. In the commit message, I recommend you to describe the "pink" color
issue when using HDMI. It is going to make easy to understand the issue
that your patch is trying to fix. 

Thanks

On 10/17, Liu, Zhan wrote:
> From: Zhan Liu 
> 
> [Why]
> When a specific kind of connector is detected,
> DC needs to set the attribute of the stream.
> This step needs to be done before enabling link,
> or some bugs (e.g. display won't light up)
> will be observed.
> 
> [How]
> Setting the attribute of the stream first, then
> enabling stream.
> 
> Signed-off-by: Zhan Liu 
> ---
>  drivers/gpu/drm/amd/display/dc/core/dc_link.c | 20 +--
>  1 file changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
> b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> index fb18681b502b..713caab82837 100644
> --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> @@ -2745,16 +2745,6 @@ void core_link_enable_stream(
> dc_is_virtual_signal(pipe_ctx->stream->signal))
> return;
> 
> -   if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
> -   stream->link->link_enc->funcs->setup(
> -   stream->link->link_enc,
> -   pipe_ctx->stream->signal);
> -   pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
> -   pipe_ctx->stream_res.stream_enc,
> -   pipe_ctx->stream_res.tg->inst,
> -   stream->timing.timing_3d_format != 
> TIMING_3D_FORMAT_NONE);
> -   }
> -
> if (dc_is_dp_signal(pipe_ctx->stream->signal))
> 
> pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
> pipe_ctx->stream_res.stream_enc,
> @@ -2841,6 +2831,16 @@ void core_link_enable_stream(
> CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
> COLOR_DEPTH_UNDEFINED);
> 
> +   if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
> +   stream->link->link_enc->funcs->setup(
> +   stream->link->link_enc,
> +   pipe_ctx->stream->signal);
> +   
> pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
> +   pipe_ctx->stream_res.stream_enc,
> +   pipe_ctx->stream_res.tg->inst,
> +   stream->timing.timing_3d_format != 
> TIMING_3D_FORMAT_NONE);
> +   }
> +
>  #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
> if (pipe_ctx->stream->timing.flags.DSC) {
> if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
> --
> 2.17.1
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

-- 
Rodrigo Siqueira
Software Engineer, Advanced Micro Devices (AMD)
https://siqueira.tech


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: Spontaneous reboots when using RX 560

2019-10-17 Thread Sylvain Munaut
Hi,


> > I have RX 560 2G card. It's plugged into a 16x physical / 4x
> > electrical slot of a X570 chipset motherboard with a Ryzen 3700X CPU.
> > The hardware works fine and is stable under Windows (tested with
> > games, benchmarks, stress-tests, ...)
>
> Does booting with pci=noats on the kernel command line in grub fix the issue?

It doesn't :/

Message is slightly different but same idea :

[   83.704035] amdgpu :06:00.0: AMD-Vi: Event logged
[IO_PAGE_FAULT domain=0x address=0x0 flags=0x0020]
[   88.732685] [drm:amdgpu_dm_commit_planes.constprop.0 [amdgpu]]
*ERROR* Waiting for fences timed out or interrupted!
[   92.074379] ixgbe :04:00.1: Adapter removed
[   93.480989] igb :07:00.0 enp7s0: PCIe link lost

So it screws up the PCIe very badly :/
Specifically seems to be everything connected to the X570 chipset.

Cheers,

Sylvain
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/powerplay: add lock protection for swSMU APIs

2019-10-17 Thread Grodzovsky, Andrey

On 10/16/19 11:55 PM, Quan, Evan wrote:
> This is a quick and low risk fix. Those APIs which
> are exposed to other IPs or to support sysfs/hwmon
> interfaces or DAL will have lock protection. Meanwhile
> no lock protection is enforced for swSMU internal used
> APIs. Future optimization is needed.


Does it mean that there is still risk of collision on SMU access between 
external API function to internal one ?

Andrey


>
> Change-Id: I8392652c9da1574a85acd9b171f04380f3630852
> Signed-off-by: Evan Quan 
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c   |   6 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h   |   6 -
>   drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c|  23 +-
>   .../amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c  |   4 +-
>   drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 684 --
>   .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h| 163 +++--
>   drivers/gpu/drm/amd/powerplay/navi10_ppt.c|  15 +-
>   drivers/gpu/drm/amd/powerplay/renoir_ppt.c|  12 +-
>   drivers/gpu/drm/amd/powerplay/smu_v11_0.c |   7 +-
>   drivers/gpu/drm/amd/powerplay/vega20_ppt.c|   6 +-
>   10 files changed, 773 insertions(+), 153 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> index 263265245e19..28d32725285b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> @@ -912,7 +912,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool 
> low)
>   if (is_support_sw_smu(adev)) {
>   ret = smu_get_dpm_freq_range(>smu, SMU_GFXCLK,
>low ? _freq : NULL,
> -  !low ? _freq : NULL);
> +  !low ? _freq : NULL,
> +  true);
>   if (ret)
>   return 0;
>   return clk_freq * 100;
> @@ -930,7 +931,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool 
> low)
>   if (is_support_sw_smu(adev)) {
>   ret = smu_get_dpm_freq_range(>smu, SMU_UCLK,
>low ? _freq : NULL,
> -  !low ? _freq : NULL);
> +  !low ? _freq : NULL,
> +  true);
>   if (ret)
>   return 0;
>   return clk_freq * 100;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> index 1c5c0fd76dbf..2cfb677272af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> @@ -298,12 +298,6 @@ enum amdgpu_pcie_gen {
>   #define amdgpu_dpm_get_current_power_state(adev) \
>   
> ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
>   
> -#define amdgpu_smu_get_current_power_state(adev) \
> - ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
> -
> -#define amdgpu_smu_set_power_state(adev) \
> - ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
> -
>   #define amdgpu_dpm_get_pp_num_states(adev, data) \
>   
> ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, 
> data))
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index c50d5f1e75e5..36f36b35000d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -211,7 +211,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
>   
>   if (is_support_sw_smu(adev)) {
>   if (adev->smu.ppt_funcs->get_current_power_state)
> - pm = amdgpu_smu_get_current_power_state(adev);
> + pm = smu_get_current_power_state(>smu);
>   else
>   pm = adev->pm.dpm.user_state;
>   } else if (adev->powerplay.pp_funcs->get_current_power_state) {
> @@ -957,7 +957,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
>   return ret;
>   
>   if (is_support_sw_smu(adev))
> - ret = smu_force_clk_levels(>smu, SMU_SCLK, mask);
> + ret = smu_force_clk_levels(>smu, SMU_SCLK, mask, true);
>   else if (adev->powerplay.pp_funcs->force_clock_level)
>   ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
>   
> @@ -1004,7 +1004,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device 
> *dev,
>   return ret;
>   
>   if (is_support_sw_smu(adev))
> - ret = smu_force_clk_levels(>smu, SMU_MCLK, mask);
> + ret = smu_force_clk_levels(>smu, SMU_MCLK, mask, true);
>   else if (adev->powerplay.pp_funcs->force_clock_level)
>   ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
>   
> @@ -1044,7 +1044,7 @@ static ssize_t 

Re: Spontaneous reboots when using RX 560

2019-10-17 Thread Alex Deucher
On Thu, Oct 17, 2019 at 10:11 AM Sylvain Munaut <246...@gmail.com> wrote:
>
> HI,
>
> I have RX 560 2G card. It's plugged into a 16x physical / 4x
> electrical slot of a X570 chipset motherboard with a Ryzen 3700X CPU.
> The hardware works fine and is stable under Windows (tested with
> games, benchmarks, stress-tests, ...)

Does booting with pci=noats on the kernel command line in grub fix the issue?

Alex

>
> But when trying for instance steam under linux, or even just the 'app
> launcher' from ubuntu that has some visual effect, the machine will
> instantly reboot.
> Also, after the reboot, the GPU is no longer detected (lspci doesn't
> show it, and under windows, its no where to be seen either). It needs
> to be physically turned off and turned back on for it to work again.
>
> I added a serial console to try to get some output and when doing that
> it doesn't immediately reboot (but the rest is the same, machine is
> unusable and a reboot will have the GPU not present anymore until
> poweroff).
>
> This is the output I get :
>
> [  144.311704] amdgpu :06:00.0: AMD-Vi: Event logged
> [IO_PAGE_FAULT domain=0x address=0xa076010100 flags=0x0010]
> [  144.322734] amdgpu :06:00.0: AMD-Vi: Event logged
> [IO_PAGE_FAULT domain=0x address=0xa076230100 flags=0x0010]
> [  144.333751] amdgpu :06:00.0: AMD-Vi: Event logged
> [IO_PAGE_FAULT domain=0x address=0xa076030100 flags=0x0010]
> [  147.028625] AMD-Vi: Completion-Wait loop timed out
> [  147.206336] AMD-Vi: Completion-Wait loop timed out
> [  147.368260] AMD-Vi: Completion-Wait loop timed out
> [  147.532296] AMD-Vi: Completion-Wait loop timed out
> [  147.703269] AMD-Vi: Completion-Wait loop timed out
> [  147.845840] AMD-Vi: Completion-Wait loop timed out
> [  147.860950] iommu ivhd0: AMD-Vi: Event logged [IOTLB_INV_TIMEOUT
> device=06:00.0 address=0x81b1c1e60]
> [  148.015778] AMD-Vi: Completion-Wait loop timed out
> [  148.187270] AMD-Vi: Completion-Wait loop timed out
>
> (and then it seem to infinitely loop always printing that).
>
> I tried Ubuntu 19.10 with 5.3.0-18-generic
> Also Ubuntu 19.04 with 5.0.0-31-generic
> Also tried with a DKMS module from 19.30 AMDGPU-PRO patched to build
> and load under 5.3.0, all give the same result.
>
> Cheers,
>
> Sylvain Munaut
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Spontaneous reboots when using RX 560

2019-10-17 Thread Sylvain Munaut
HI,

I have RX 560 2G card. It's plugged into a 16x physical / 4x
electrical slot of a X570 chipset motherboard with a Ryzen 3700X CPU.
The hardware works fine and is stable under Windows (tested with
games, benchmarks, stress-tests, ...)

But when trying for instance steam under linux, or even just the 'app
launcher' from ubuntu that has some visual effect, the machine will
instantly reboot.
Also, after the reboot, the GPU is no longer detected (lspci doesn't
show it, and under windows, its no where to be seen either). It needs
to be physically turned off and turned back on for it to work again.

I added a serial console to try to get some output and when doing that
it doesn't immediately reboot (but the rest is the same, machine is
unusable and a reboot will have the GPU not present anymore until
poweroff).

This is the output I get :

[  144.311704] amdgpu :06:00.0: AMD-Vi: Event logged
[IO_PAGE_FAULT domain=0x address=0xa076010100 flags=0x0010]
[  144.322734] amdgpu :06:00.0: AMD-Vi: Event logged
[IO_PAGE_FAULT domain=0x address=0xa076230100 flags=0x0010]
[  144.333751] amdgpu :06:00.0: AMD-Vi: Event logged
[IO_PAGE_FAULT domain=0x address=0xa076030100 flags=0x0010]
[  147.028625] AMD-Vi: Completion-Wait loop timed out
[  147.206336] AMD-Vi: Completion-Wait loop timed out
[  147.368260] AMD-Vi: Completion-Wait loop timed out
[  147.532296] AMD-Vi: Completion-Wait loop timed out
[  147.703269] AMD-Vi: Completion-Wait loop timed out
[  147.845840] AMD-Vi: Completion-Wait loop timed out
[  147.860950] iommu ivhd0: AMD-Vi: Event logged [IOTLB_INV_TIMEOUT
device=06:00.0 address=0x81b1c1e60]
[  148.015778] AMD-Vi: Completion-Wait loop timed out
[  148.187270] AMD-Vi: Completion-Wait loop timed out

(and then it seem to infinitely loop always printing that).

I tried Ubuntu 19.10 with 5.3.0-18-generic
Also Ubuntu 19.04 with 5.0.0-31-generic
Also tried with a DKMS module from 19.30 AMDGPU-PRO patched to build
and load under 5.3.0, all give the same result.

Cheers,

Sylvain Munaut
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/display: Modify display link stream setup sequence.

2019-10-17 Thread Liu, Zhan
Inline.

> -Original Message-
> From: Kazlauskas, Nicholas 
> Sent: 2019/October/17, Thursday 9:37 AM
> To: Liu, Zhan ; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amd/display: Modify display link stream setup
> sequence.
> 
> On 2019-10-17 12:28 a.m., Liu, Zhan wrote:
> > From: Zhan Liu 
> >
> > [Why]
> > When a specific kind of connector is detected,
> > DC needs to set the attribute of the stream.
> > This step needs to be done before enabling link,
> > or some bugs (e.g. display won't light up)
> > will be observed.
> >
> > [How]
> > Setting the attribute of the stream first, then
> > enabling stream.
> >
> > Signed-off-by: Zhan Liu 
> 
> NAK:
> 
> 1. It's difficult to understand what issue this change is attempting to
> solve and why it actually does it. Specifics would help here.

Some of the details are IP-sensitive, that's why I choose not to include 
details.

> 
> 2. It affects a common code path for all ASICs which has been tested and
> known to be working correctly for those test cases.

As we discussed before, considering Navi10 and Navi14 are using the same DC 
code, and the issue is only happening on Navi14, its more likely the issue is a 
BIOS issue. However, if we want to fix it on display side, we can only do some 
kinds of workaround to fix it. Another alternative is to do stream setup twice, 
but there is no point to repeat the setup two times. 

If we really worry about all AISCs will be influenced, we can guard the section 
as a Navi14 specific code, and treat this patch as a "hack".

> 
> 3. The description is incorrect - the link enable/stream enable were
> both previously happening after the stream setup. What's changed in the
> patch is the link enable now happens before the link setup.
> 
> Both of these calls internally go through the command table to VBIOS so
> what behavior differences you're seeing may be caused by the input
> parameters to the ATOM_ENCODER_CMD_STREAM_SETUP or
> TRANSMITTER_CONTROL_ENABLE commands or the actual execution of
> those
> commands.
> 
> Nicholas Kazlauskas
> 
> > ---
> >   drivers/gpu/drm/amd/display/dc/core/dc_link.c | 20 +--
> >   1 file changed, 10 insertions(+), 10 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> > index fb18681b502b..713caab82837 100644
> > --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> > +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> > @@ -2745,16 +2745,6 @@ void core_link_enable_stream(
> >  dc_is_virtual_signal(pipe_ctx->stream->signal))
> >  return;
> >
> > -   if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
> > -   stream->link->link_enc->funcs->setup(
> > -   stream->link->link_enc,
> > -   pipe_ctx->stream->signal);
> > -   pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
> > -   pipe_ctx->stream_res.stream_enc,
> > -   pipe_ctx->stream_res.tg->inst,
> > -   stream->timing.timing_3d_format !=
> TIMING_3D_FORMAT_NONE);
> > -   }
> > -
> >  if (dc_is_dp_signal(pipe_ctx->stream->signal))
> >  pipe_ctx->stream_res.stream_enc->funcs-
> >dp_set_stream_attribute(
> >  pipe_ctx->stream_res.stream_enc,
> > @@ -2841,6 +2831,16 @@ void core_link_enable_stream(
> >  
> > CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
> >  COLOR_DEPTH_UNDEFINED);
> >
> > +   if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
> > +   stream->link->link_enc->funcs->setup(
> > +   stream->link->link_enc,
> > +   pipe_ctx->stream->signal);
> > +   pipe_ctx->stream_res.stream_enc->funcs-
> >setup_stereo_sync(
> > +   pipe_ctx->stream_res.stream_enc,
> > +   pipe_ctx->stream_res.tg->inst,
> > +   stream->timing.timing_3d_format !=
> TIMING_3D_FORMAT_NONE);
> > +   }
> > +
> >   #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
> >  if (pipe_ctx->stream->timing.flags.DSC) {
> >  if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
> > --
> > 2.17.1
> > ___
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> >

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: Modify display link stream setup sequence.

2019-10-17 Thread Kazlauskas, Nicholas
On 2019-10-17 12:28 a.m., Liu, Zhan wrote:
> From: Zhan Liu 
> 
> [Why]
> When a specific kind of connector is detected,
> DC needs to set the attribute of the stream.
> This step needs to be done before enabling link,
> or some bugs (e.g. display won't light up)
> will be observed.
> 
> [How]
> Setting the attribute of the stream first, then
> enabling stream.
> 
> Signed-off-by: Zhan Liu 

NAK:

1. It's difficult to understand what issue this change is attempting to 
solve and why it actually does it. Specifics would help here.

2. It affects a common code path for all ASICs which has been tested and 
known to be working correctly for those test cases.

3. The description is incorrect - the link enable/stream enable were 
both previously happening after the stream setup. What's changed in the 
patch is the link enable now happens before the link setup.

Both of these calls internally go through the command table to VBIOS so 
what behavior differences you're seeing may be caused by the input 
parameters to the ATOM_ENCODER_CMD_STREAM_SETUP or 
TRANSMITTER_CONTROL_ENABLE commands or the actual execution of those 
commands.

Nicholas Kazlauskas

> ---
>   drivers/gpu/drm/amd/display/dc/core/dc_link.c | 20 +--
>   1 file changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
> b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> index fb18681b502b..713caab82837 100644
> --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> @@ -2745,16 +2745,6 @@ void core_link_enable_stream(
>  dc_is_virtual_signal(pipe_ctx->stream->signal))
>  return;
> 
> -   if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
> -   stream->link->link_enc->funcs->setup(
> -   stream->link->link_enc,
> -   pipe_ctx->stream->signal);
> -   pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
> -   pipe_ctx->stream_res.stream_enc,
> -   pipe_ctx->stream_res.tg->inst,
> -   stream->timing.timing_3d_format != 
> TIMING_3D_FORMAT_NONE);
> -   }
> -
>  if (dc_is_dp_signal(pipe_ctx->stream->signal))
>  
> pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
>  pipe_ctx->stream_res.stream_enc,
> @@ -2841,6 +2831,16 @@ void core_link_enable_stream(
>  CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
>  COLOR_DEPTH_UNDEFINED);
> 
> +   if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
> +   stream->link->link_enc->funcs->setup(
> +   stream->link->link_enc,
> +   pipe_ctx->stream->signal);
> +   
> pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
> +   pipe_ctx->stream_res.stream_enc,
> +   pipe_ctx->stream_res.tg->inst,
> +   stream->timing.timing_3d_format != 
> TIMING_3D_FORMAT_NONE);
> +   }
> +
>   #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
>  if (pipe_ctx->stream->timing.flags.DSC) {
>  if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
> --
> 2.17.1
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: Avoid sending abnormal VSIF

2019-10-17 Thread Kazlauskas, Nicholas
On 2019-10-17 5:11 a.m., Wayne Lin wrote:
> [Why]
> While setting hdmi_vic, hv_frame.vic is not initialized and might
> assign a wrong value to hdmi_vic. Cause to send out VSIF with
> abnormal value.
> 
> [How]
> Initialize hv_frame and avi_frame
> 
> Signed-off-by: Wayne Lin 

Reviewed-by: Nicholas Kazlauskas 

Thanks!

> ---
>   drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
>   1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 10cce584719f..33a455c90e27 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -3472,6 +3472,9 @@ static void 
> fill_stream_properties_from_drm_display_mode(
>   struct hdmi_vendor_infoframe hv_frame;
>   struct hdmi_avi_infoframe avi_frame;
>   
> + memset(_frame, 0, sizeof(hv_frame));
> + memset(_frame, 0, sizeof(avi_frame));
> +
>   timing_out->h_border_left = 0;
>   timing_out->h_border_right = 0;
>   timing_out->v_border_top = 0;
> 

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 18/19] ALSA: hda/hdmi - enable runtime pm for newer AMD display audio

2019-10-17 Thread Alex Deucher
On Thu, Oct 17, 2019 at 3:51 AM Takashi Iwai  wrote:
>
> On Fri, 11 Oct 2019 03:45:35 +0200,
> Alex Deucher wrote:
> >
> > We are able to power down the GPU and audio via the GPU driver
> > so flag these asics as supporting runtime pm.
> >
> > Signed-off-by: Alex Deucher 
>
> Sorry for the late reply, as I've been off for the last few weeks.
>
> The change itself looks good to me, but maybe it'd be nicer to mention
> that you're adding new IDs, not only flipping the flag of the existing
> entries.

I'll split it into two patches.

>
> Also, you can define a new flag, e.g.
>
> #define AZX_DCAPS_PRESET_ATI_HDMI_PM \
> (AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME)
>
> and use them instead of open-coding at each place.
> This is just a matter of taste, so I don't mind either way, though.
>

Thanks!

Alex

>
> thanks,
>
> Takashi
>
> > ---
> >  sound/pci/hda/hda_intel.c | 35 ++-
> >  1 file changed, 30 insertions(+), 5 deletions(-)
> >
> > diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
> > index 9b90312d7314..925a95927f20 100644
> > --- a/sound/pci/hda/hda_intel.c
> > +++ b/sound/pci/hda/hda_intel.c
> > @@ -2562,13 +2562,38 @@ static const struct pci_device_id azx_ids[] = {
> >   { PCI_DEVICE(0x1002, 0xaac8),
> > .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > },
> >   { PCI_DEVICE(0x1002, 0xaad8),
> > -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > },
> > - { PCI_DEVICE(0x1002, 0xaae8),
> > -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > },
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> >   { PCI_DEVICE(0x1002, 0xaae0),
> > -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > },
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xaae8),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> >   { PCI_DEVICE(0x1002, 0xaaf0),
> > -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > },
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xaaf8),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xab00),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xab08),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xab10),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xab18),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xab20),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> > + { PCI_DEVICE(0x1002, 0xab38),
> > +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS 
> > |
> > +   AZX_DCAPS_PM_RUNTIME },
> >   /* VIA VT8251/VT8237A */
> >   { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
> >   /* VIA GFX VT7122/VX900 */
> > --
> > 2.20.1
> >
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 1/3] drm/amdgpu: fix stack alignment ABI mismatch for Clang

2019-10-17 Thread S, Shirish
Tested-by: Shirish S  



Regards,
Shirish S

-Original Message-
From: Nick Desaulniers  
Sent: Thursday, October 17, 2019 4:32 AM
To: Wentland, Harry ; Deucher, Alexander 

Cc: yshu...@gmail.com; andrew.coop...@citrix.com; a...@arndb.de; 
clang-built-li...@googlegroups.com; m...@google.com; S, Shirish 
; Zhou, David(ChunMing) ; Koenig, 
Christian ; amd-gfx@lists.freedesktop.org; 
linux-ker...@vger.kernel.org; Nick Desaulniers 
Subject: [PATCH 1/3] drm/amdgpu: fix stack alignment ABI mismatch for Clang

The x86 kernel is compiled with an 8B stack alignment via 
`-mpreferred-stack-boundary=3` for GCC since 3.6-rc1 via commit d9b0cde91c60 
("x86-64, gcc: Use -mpreferred-stack-boundary=3 if supported") or 
`-mstack-alignment=8` for Clang. Parts of the AMDGPU driver are compiled with 
16B stack alignment.

Generally, the stack alignment is part of the ABI. Linking together two 
different translation units with differing stack alignment is dangerous, 
particularly when the translation unit with the smaller stack alignment makes 
calls into the translation unit with the larger stack alignment.
While 8B aligned stacks are sometimes also 16B aligned, they are not always.

Multiple users have reported General Protection Faults (GPF) when using the 
AMDGPU driver compiled with Clang. Clang is placing objects in stack slots 
assuming the stack is 16B aligned, and selecting instructions that require 16B 
aligned memory operands.

At runtime, syscall handlers with 8B aligned stack call into code that assumes 
16B stack alignment.  When the stack is a multiple of 8B but not 16B, these 
instructions result in a GPF.

Remove the code that added compatibility between the differing compiler flags, 
as it will result in runtime GPFs when built with Clang. Cleanups for GCC will 
be sent in later patches in the series.

Link: https://github.com/ClangBuiltLinux/linux/issues/735
Debugged-by: Yuxuan Shui 
Reported-by: Shirish S 
Reported-by: Yuxuan Shui 
Suggested-by: Andrew Cooper 
Signed-off-by: Nick Desaulniers 
---
 drivers/gpu/drm/amd/display/dc/calcs/Makefile | 10 --  
drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 10 --  
drivers/gpu/drm/amd/display/dc/dcn21/Makefile | 10 --
 drivers/gpu/drm/amd/display/dc/dml/Makefile   | 10 --
 drivers/gpu/drm/amd/display/dc/dsc/Makefile   | 10 --
 5 files changed, 20 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile 
b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 985633c08a26..4b1a8a08a5de 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -24,13 +24,11 @@
 # It calculates Bandwidth and Watermarks values for HW programming  #
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-   cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-   cc_stack_align := -mstack-alignment=16
-endif
+calcs_ccflags := -mhard-float -msse
 
-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+calcs_ccflags += -mpreferred-stack-boundary=4 endif
 
 ifdef CONFIG_CC_IS_CLANG
 calcs_ccflags += -msse2
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile 
b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index ddb8d5649e79..5fe3eb80075d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -10,13 +10,11 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 DCN20 += dcn20_dsc.o
 endif
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-   cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-   cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
 
-CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse 
$(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += 
+-mpreferred-stack-boundary=4 endif
 
 ifdef CONFIG_CC_IS_CLANG
 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 diff --git 
a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile 
b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index ef673bffc241..7057e20748b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -3,13 +3,11 @@
 
 DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-   cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-   cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
 
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse 
$(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += 
+-mpreferred-stack-boundary=4 endif
 
 ifdef CONFIG_CC_IS_CLANG
 

[PATCH] drm/amd/display: Avoid sending abnormal VSIF

2019-10-17 Thread Wayne Lin
[Why]
While setting hdmi_vic, hv_frame.vic is not initialized and might
assign a wrong value to hdmi_vic. Cause to send out VSIF with
abnormal value.

[How]
Initialize hv_frame and avi_frame

Signed-off-by: Wayne Lin 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 10cce584719f..33a455c90e27 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3472,6 +3472,9 @@ static void fill_stream_properties_from_drm_display_mode(
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
 
+   memset(_frame, 0, sizeof(hv_frame));
+   memset(_frame, 0, sizeof(avi_frame));
+
timing_out->h_border_left = 0;
timing_out->h_border_right = 0;
timing_out->v_border_top = 0;
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amdgpu/vi: silence an uninitialized variable warning

2019-10-17 Thread Dan Carpenter
Smatch complains that we need to initialized "*cap" otherwise it can
lead to an uninitialized variable bug in the caller.  This seems like a
reasonable warning and it doesn't hurt to silence it at least.

drivers/gpu/drm/amd/amdgpu/vi.c:767 vi_asic_reset_method() error: uninitialized 
symbol 'baco_reset'.

Fixes: 425db2553e43 ("drm/amdgpu: expose BACO interfaces to upper level from 
PP")
Signed-off-by: Dan Carpenter 
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 83196b79edd5..f4ff15378e61 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1421,6 +1421,7 @@ static int pp_get_asic_baco_capability(void *handle, bool 
*cap)
 {
struct pp_hwmgr *hwmgr = handle;
 
+   *cap = false;
if (!hwmgr)
return -EINVAL;
 
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH hmm 00/15] Consolidate the mmu notifier interval_tree and locking

2019-10-17 Thread Christian König

Am 16.10.19 um 18:04 schrieb Jason Gunthorpe:

On Wed, Oct 16, 2019 at 10:58:02AM +0200, Christian König wrote:

Am 15.10.19 um 20:12 schrieb Jason Gunthorpe:

From: Jason Gunthorpe 

8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others are
simple linear search lists.

Of the ones I checked they largely seem to have various kinds of races,
bugs and poor implementation. This is a result of the complexity in how
the notifier interacts with get_user_pages(). It is extremely difficult to
use it correctly.

Consolidate all of this code together into the core mmu_notifier and
provide a locking scheme similar to hmm_mirror that allows the user to
safely use get_user_pages() and reliably know if the page list still
matches the mm.

That sounds really good, but could you outline for a moment how that is
archived?

It uses the same basic scheme as hmm and rdma odp, outlined in the
revisions to hmm.rst later on.

Basically,

  seq = mmu_range_read_begin();

  // This is a speculative region
  .. get_user_pages()/hmm_range_fault() ..


How do we enforce that this get_user_pages()/hmm_range_fault() doesn't 
see outdated page table information?


In other words how the the following race prevented:

CPU A CPU B
invalidate_range_start()
      mmu_range_read_begin()
      get_user_pages()/hmm_range_fault()
Updating the ptes
invalidate_range_end()


I mean get_user_pages() tries to circumvent this issue by grabbing a 
reference to the pages in question, but that isn't sufficient for the 
SVM use case.


That's the reason why we had this horrible solution with a r/w lock and 
a linked list of BOs in an interval tree.


Regards,
Christian.


  // Result cannot be derferenced

  take_lock(driver->update);
  if (mmu_range_read_retry(, range.notifier_seq) {
 // collision! The results are not correct
 goto again
  }

  // no collision, and now under lock. Now we can de-reference the pages/etc
  // program HW
  // Now the invalidate callback is responsible to synchronize against changes
  unlock(driver->update)

Basically, anything that was using hmm_mirror correctly transisions
over fairly trivially, just with the modification to store a sequence
number to close that race described in the hmm commit.

For something like AMD gpu I expect it to transition to use dma_fence
from the notifier for coherency right before it unlocks driver->update.

Jason
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 17/19] ALSA: hda/hdmi - fix vgaswitcheroo detection for AMD

2019-10-17 Thread Takashi Iwai
On Fri, 11 Oct 2019 03:45:34 +0200,
Alex Deucher wrote:
> 
> Only enable the vga_switcheroo logic on systems with the
> ATPX ACPI method.  This logic is not needed for asics
> that are not part of a PX (PowerXpress)/HG (Hybrid Graphics)
> platform.
> 
> Signed-off-by: Alex Deucher 
> ---
>  sound/pci/hda/hda_intel.c | 39 +++
>  1 file changed, 39 insertions(+)
> 
> diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
> index 1e14d7270adf..9b90312d7314 100644
> --- a/sound/pci/hda/hda_intel.c
> +++ b/sound/pci/hda/hda_intel.c
> @@ -35,6 +35,9 @@
>  #include 
>  #include 
>  #include 
> +#ifdef SUPPORT_VGA_SWITCHEROO
> +#include 
> +#endif
>  
>  #ifdef CONFIG_X86
>  /* for snoop control */
> @@ -1345,6 +1348,26 @@ static int azx_dev_free(struct snd_device *device)
>  }
>  
>  #ifdef SUPPORT_VGA_SWITCHEROO
> +/* ATPX is in the integrated GPU's namespace */
> +static struct pci_dev *atpx_present(void)
> +{
> + struct pci_dev *pdev = NULL;
> + acpi_handle dhandle, atpx_handle;
> + acpi_status status;
> +
> + while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != 
> NULL) {
> + dhandle = ACPI_HANDLE(>dev);
> + if (!dhandle)
> + continue;
> +
> + status = acpi_get_handle(dhandle, "ATPX", _handle);
> + if (ACPI_FAILURE(status))
> + continue;
> + return pdev;
> + }
> + return NULL;
> +}
> +
>  /*
>   * Check of disabled HDMI controller by vga_switcheroo
>   */
> @@ -1356,6 +1379,22 @@ static struct pci_dev *get_bound_vga(struct pci_dev 
> *pci)
>   switch (pci->vendor) {
>   case PCI_VENDOR_ID_ATI:
>   case PCI_VENDOR_ID_AMD:
> + if (pci->devfn == 1) {
> + p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
> + pci->bus->number, 0);
> + if (p) {
> + /* ATPX is in the integrated GPU's ACPI 
> namespace
> +  * rather than the dGPU's namespace. However,
> +  * the dGPU is the one who is involved in
> +  * vgaswitcheroo.
> +  */
> + if (((p->class >> 16) == 
> PCI_BASE_CLASS_DISPLAY) &&
> + atpx_present())
> + return p;

Won't this lead to the unbalanced refcount for the device returned
from atpx_present()?


thanks,

Takashi

> + pci_dev_put(p);
> + }
> + }
> + break;
>   case PCI_VENDOR_ID_NVIDIA:
>   if (pci->devfn == 1) {
>   p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
> -- 
> 2.20.1
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 18/19] ALSA: hda/hdmi - enable runtime pm for newer AMD display audio

2019-10-17 Thread Takashi Iwai
On Fri, 11 Oct 2019 03:45:35 +0200,
Alex Deucher wrote:
> 
> We are able to power down the GPU and audio via the GPU driver
> so flag these asics as supporting runtime pm.
> 
> Signed-off-by: Alex Deucher 

Sorry for the late reply, as I've been off for the last few weeks.

The change itself looks good to me, but maybe it'd be nicer to mention
that you're adding new IDs, not only flipping the flag of the existing
entries.

Also, you can define a new flag, e.g.

#define AZX_DCAPS_PRESET_ATI_HDMI_PM \
(AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME)

and use them instead of open-coding at each place.
This is just a matter of taste, so I don't mind either way, though.


thanks,

Takashi

> ---
>  sound/pci/hda/hda_intel.c | 35 ++-
>  1 file changed, 30 insertions(+), 5 deletions(-)
> 
> diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
> index 9b90312d7314..925a95927f20 100644
> --- a/sound/pci/hda/hda_intel.c
> +++ b/sound/pci/hda/hda_intel.c
> @@ -2562,13 +2562,38 @@ static const struct pci_device_id azx_ids[] = {
>   { PCI_DEVICE(0x1002, 0xaac8),
> .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
>   { PCI_DEVICE(0x1002, 0xaad8),
> -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
> - { PCI_DEVICE(0x1002, 0xaae8),
> -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
>   { PCI_DEVICE(0x1002, 0xaae0),
> -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xaae8),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
>   { PCI_DEVICE(0x1002, 0xaaf0),
> -   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xaaf8),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xab00),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xab08),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xab10),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xab18),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xab20),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
> + { PCI_DEVICE(0x1002, 0xab38),
> +   .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
> +   AZX_DCAPS_PM_RUNTIME },
>   /* VIA VT8251/VT8237A */
>   { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
>   /* VIA GFX VT7122/VX900 */
> -- 
> 2.20.1
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amdgpu/display: fix compile error

2019-10-17 Thread Chen Wandun
From: Chenwandun 

drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_resource.c:1913:48: error: 
struct dc_crtc_timing_flags has no member named DSC
   if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
^
drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_resource.c:1914:73: error: 
struct dc_crtc_timing has no member named dsc_cfg
   pipes[pipe_cnt].dout.output_bpp = 
res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
^
Signed-off-by: Chenwandun 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 914e378..4f03318 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1910,8 +1910,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
 
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
pipes[pipe_cnt].dout.output_bpp = 
res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+#endif
 
/* todo: default max for now, until there is logic reflecting 
this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
-- 
2.7.4