[PATCH 6/8] amdgpu/pm: Powerplay API for smu , changes to clock and profile mode functions

2020-12-18 Thread Darren Powell
New Functions
  smu_bump_power_profile_mode() - changes profile mode assuming calling 
function already has mutex
  smu_force_ppclk_levels()  - accepts Powerplay enum pp_clock_type to 
specify clock to change
  smu_print_ppclk_levels()  - accepts Powerplay enum pp_clock_type to 
request clock levels
  amdgpu_get_pp_dpm_clock() - accepts Powerplay enum pp_clock_type to 
request clock levels and allows
  all the amdgpu_get_pp_dpm_$CLK functions to 
have a single codepath
  amdgpu_set_pp_dpm_clock() - accepts Powerplay enum pp_clock_type to set 
clock levels and allows
  all the amdgpu_set_pp_dpm_$CLK functions to 
have a single codepath

Modified Functions
  smu_force_smuclk_levels- changed function name to make clear 
difference to smu_force_ppclk_levels
  smu_force_ppclk_levels()   - modifed signature to implement Powerplay API 
force_clock_level
 - calls smu_force_smuclk_levels
  smu_print_smuclk_levels- changed function name to make clear 
difference to smu_print_ppclk_levels
  smu_print_ppclk_levels()   - modifed signature to implement Powerplay API 
force_clock_level
 - calls smu_print_smuclk_levels
  smu_sys_get_gpu_metrics- modifed arg0 to match Powerplay API 
get_gpu_metrics
  smu_get_power_profile_mode - modifed arg0 to match Powerplay API 
get_power_profile_mode
  smu_set_power_profile_mode - modifed arg0 to match Powerplay API 
set_power_profile_mode
 - removed arg lock_needed, mutex always 
locked, internal functions
   can call smu_bump if they already hold lock
  smu_switch_power_profile   - now calls smu_bump as already holds mutex 
lock
  smu_adjust_power_state_dynamic - now calls smu_bump as already holds mutex 
lock
  amdgpu_get_pp_od_clk_voltage   - uses smu_print_ppclk_levels
  amdgpu_{set,get}_pp_dpm_$CLK   - replace logic with call helper function 
amdgpu_{set,get}_pp_dpm_clock()
   CLK ={sclk, mclk, socclk, fclk, dcefclk, pci}

Other Changes
  added 5 smu Powerplay functions to swsmu_dpm_funcs
  removed special smu handling in pm functions and called through Powerplay API

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c| 348 +++---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   |  15 +-
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c |  93 --
 3 files changed, 122 insertions(+), 334 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 41da5870af58..b84b14dc3eb9 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -879,10 +879,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device 
*dev,
}
 
if (is_support_sw_smu(adev)) {
-   size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
-   size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
-   size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, 
buf+size);
-   size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, 
buf+size);
+   size = smu_print_ppclk_levels(&adev->smu, OD_SCLK, buf);
+   size += smu_print_ppclk_levels(&adev->smu, OD_MCLK, buf+size);
+   size += smu_print_ppclk_levels(&adev->smu, OD_VDDC_CURVE, 
buf+size);
+   size += smu_print_ppclk_levels(&adev->smu, OD_RANGE, buf+size);
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
@@ -1016,8 +1016,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
  * NOTE: change to the dcefclk max dpm level is not supported now
  */
 
-static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
-   struct device_attribute *attr,
+static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
+   enum pp_clock_type type,
char *buf)
 {
struct drm_device *ddev = dev_get_drvdata(dev);
@@ -1034,10 +1034,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
return ret;
}
 
-   if (is_support_sw_smu(adev))
-   size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
-   else if (adev->powerplay.pp_funcs->print_clock_levels)
-   size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+   if (adev->powerplay.pp_funcs->print_clock_levels)
+   size = amdgpu_dpm_print_clock_levels(adev, type, buf);
else
size = snprintf(buf, PAGE_SIZE, "\n");
 
@@ -1083,8 +1081,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t 
count, uint32_t *mask)
return 0;
 }
 
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
-  

[PATCH 7/8] amdgpu/pm: Powerplay API for smu , changed 4 dpm functions to use API

2020-12-18 Thread Darren Powell
New Functions
  smu_get_mclk- implementation of the Powerplay API function get_mclk
  smu_get_sclk- implementation of the Powerplay API function get_sclk
  smu_handle_dpm_task - implementation of the Powerplay API function 
dispatch_tasks

Modified Functions
  smu_dpm_set_power_gate - - modifed arg0 to match Powerplay API 
set_powergating_by_smu

Other Changes
  removed special smu handling in dpm functions and called through Powerplay API
  call to smu_dpm_set_power_gate via Powerplay API now locks mutex for UVD and 
VCE

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c   | 94 ---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   |  7 +-
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 47 +++-
 3 files changed, 82 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 8ae2df82addc..296879ba99c7 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -911,50 +911,28 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx)
 
 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 {
-   uint32_t clk_freq;
-   int ret = 0;
-   if (is_support_sw_smu(adev)) {
-   ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
-low ? &clk_freq : NULL,
-!low ? &clk_freq : NULL);
-   if (ret)
-   return 0;
-   return clk_freq * 100;
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-   } else {
-   return 
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
-   }
+   return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
 }
 
 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 {
-   uint32_t clk_freq;
-   int ret = 0;
-   if (is_support_sw_smu(adev)) {
-   ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
-low ? &clk_freq : NULL,
-!low ? &clk_freq : NULL);
-   if (ret)
-   return 0;
-   return clk_freq * 100;
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-   } else {
-   return 
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
-   }
+   return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
 }
 
 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t 
block_type, bool gate)
 {
int ret = 0;
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
bool swsmu = is_support_sw_smu(adev);
 
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
-   if (swsmu) {
-   ret = smu_dpm_set_power_gate(&adev->smu, block_type, 
gate);
-   } else if (adev->powerplay.pp_funcs &&
-  adev->powerplay.pp_funcs->set_powergating_by_smu) {
+   if (pp_funcs && pp_funcs->set_powergating_by_smu) {
/*
 * TODO: need a better lock mechanism
 *
@@ -982,7 +960,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device 
*adev, uint32_t block
 * 
amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
 */
mutex_lock(&adev->pm.mutex);
-   ret = 
((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+   ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
mutex_unlock(&adev->pm.mutex);
}
@@ -990,12 +968,10 @@ int amdgpu_dpm_set_powergating_by_smu(struct 
amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
-   if (swsmu)
-   ret = smu_dpm_set_power_gate(&adev->smu, block_type, 
gate);
-   else if (adev->powerplay.pp_funcs &&
-adev->powerplay.pp_funcs->set_powergating_by_smu)
-   ret = 
((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+   if (pp_funcs && pp_funcs->set_powergating_by_smu) {
+   ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
+   }
break;
case AMD_IP_BLOCK_TYPE_JPEG:
if (swsmu)
@@ -1003,10 +979,10 @@ int amdgpu_dpm_set_powergating_by_smu(struct 
amdgpu_device *adev, uint32_t block
break;
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
-   

[PATCH 8/8] amdgpu/pm: Powerplay API for smu , updates to some pm functions

2020-12-18 Thread Darren Powell
Modified Functions
  smu_sys_set_pp_table()- modifed signature to match Powerplay API 
set_pp_table
  smu_force_performance_level() - modifed arg0 to match Powerplay API 
force_performance_level
  smu_od_edit_dpm_table()   - modifed arg0 to match Powerplay API 
odn_edit_dpm_table

Other Changes
  smu_od_edit_dpm_table()   - removed call to task(READJUST_POWER_STATE) 
after COMMIT_TABLE,
  now handled in calling function
  amdgpu_set_power_dpm_force_performance_level() - now checks thermal for swsmu 
systems before trying to change level
  amdgpu_set_pp_od_clk_voltage() - now attempts to set fine_grain_clock_vol 
before swsmu edit dpm table

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c| 94 ---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   |  6 +-
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 17 ++--
 3 files changed, 46 insertions(+), 71 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b84b14dc3eb9..de89f7d895ee 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -369,14 +369,7 @@ static ssize_t 
amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_force_performance_level(&adev->smu, level);
-   if (ret) {
-   pm_runtime_mark_last_busy(ddev->dev);
-   pm_runtime_put_autosuspend(ddev->dev);
-   return -EINVAL;
-   }
-   } else if (pp_funcs->force_performance_level) {
+   if (pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
mutex_unlock(&adev->pm.mutex);
@@ -619,15 +612,12 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
return ret;
}
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
-   if (ret) {
-   pm_runtime_mark_last_busy(ddev->dev);
-   pm_runtime_put_autosuspend(ddev->dev);
-   return ret;
-   }
-   } else if (adev->powerplay.pp_funcs->set_pp_table)
-   amdgpu_dpm_set_pp_table(adev, buf, count);
+   ret = amdgpu_dpm_set_pp_table(adev, buf, count);
+   if (ret) {
+   pm_runtime_mark_last_busy(ddev->dev);
+   pm_runtime_put_autosuspend(ddev->dev);
+   return ret;
+   }
 
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -807,53 +797,42 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device 
*dev,
return ret;
}
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_od_edit_dpm_table(&adev->smu, type,
-   parameter, parameter_size);
-
+   if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
+   ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
+   parameter,
+   parameter_size);
if (ret) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
-   } else {
-
-   if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
-   ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
-   parameter,
-   parameter_size);
-   if (ret) {
-   pm_runtime_mark_last_busy(ddev->dev);
-   pm_runtime_put_autosuspend(ddev->dev);
-   return -EINVAL;
-   }
-   }
+   }
 
-   if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
-   ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
-   parameter, parameter_size);
-   if (ret) {
-   pm_runtime_mark_last_busy(ddev->dev);
-   pm_runtime_put_autosuspend(ddev->dev);
-   return -EINVAL;
-   }
+   if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
+   ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+   parameter, parameter_size);
+   if (ret) {
+   pm_runtime_mark_last_busy(ddev->dev);
+   pm_runtime_put_autosuspend(ddev->dev);
+   ret

[PATCH 5/8] amdgpu/pm: Powerplay API for smu , changed 5 dpm powergating & sensor functions to use API

2020-12-18 Thread Darren Powell
New Functions
  smu_get_baco_capability() - Implement Powerplay API get_asic_baco_capability
  smu_baco_set_state()  - Implement Powerplay API set_asic_baco_state

Modified Functions
  smu_read_sensor() - modifed signature to match Powerplay API read_sensor

Other Changes
  added 3 above smu Powerplay functions to swsmu_dpm_funcs
  removed special smu handling in 5 dpm functions and called through Powerplay 
API

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c   | 86 ---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   |  7 +-
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 72 +--
 3 files changed, 100 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index df5e7b573428..8ae2df82addc 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -1019,18 +1019,13 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 {
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
-   struct smu_context *smu = &adev->smu;
int ret = 0;
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_baco_enter(smu);
-   } else {
-   if (!pp_funcs || !pp_funcs->set_asic_baco_state)
-   return -ENOENT;
+   if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+   return -ENOENT;
 
-   /* enter BACO state */
-   ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
-   }
+   /* enter BACO state */
+   ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 
return ret;
 }
@@ -1039,18 +1034,13 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 {
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
-   struct smu_context *smu = &adev->smu;
int ret = 0;
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_baco_exit(smu);
-   } else {
-   if (!pp_funcs || !pp_funcs->set_asic_baco_state)
-   return -ENOENT;
+   if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+   return -ENOENT;
 
-   /* exit BACO state */
-   ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
-   }
+   /* exit BACO state */
+   ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 
return ret;
 }
@@ -1074,20 +1064,15 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device 
*adev)
 {
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
-   struct smu_context *smu = &adev->smu;
bool baco_cap;
 
-   if (is_support_sw_smu(adev)) {
-   return smu_baco_is_support(smu);
-   } else {
-   if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
-   return false;
+   if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
+   return false;
 
-   if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
-   return false;
+   if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
+   return false;
 
-   return baco_cap ? true : false;
-   }
+   return baco_cap;
 }
 
 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
@@ -1105,32 +1090,20 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 {
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
-   struct smu_context *smu = &adev->smu;
int ret = 0;
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_baco_enter(smu);
-   if (ret)
-   return ret;
-
-   ret = smu_baco_exit(smu);
-   if (ret)
-   return ret;
-   } else {
-   if (!pp_funcs
-   || !pp_funcs->set_asic_baco_state)
-   return -ENOENT;
+   if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+   return -ENOENT;
 
-   /* enter BACO state */
-   ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
-   if (ret)
-   return ret;
+   /* enter BACO state */
+   ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+   if (ret)
+   return ret;
 
-   /* exit BACO state */
-   ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
-   if (ret)
-   return ret;
-   }
+   /* exit BACO state */
+   ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+   if (ret)
+   return ret;
 
return 0;
 }
@@ -1272,20 +1245,17 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device 
*adev)
 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev

[PATCH 3/8] amdgpu/pm: Powerplay API for smu , changed 6 pm hwmon fan functions to use API

2020-12-18 Thread Darren Powell
Modified Functions
  smu_set_fan_speed_rpm() - modifed arg0 to match Powerplay API 
set_fan_speed_rpm
  smu_get_fan_control_mode()  - modifed signature to match Powerplay API 
get_fan_control_mode
  smu_set_fan_control_mode()  - modifed signature to match Powerplay API 
set_fan_control_mode
  smu_get_fan_speed_percent() - modifed signature to match Powerplay API 
get_fan_speed_percent
  smu_set_fan_speed_percent() - modifed signature to match Powerplay API 
set_fan_speed_percent
  smu_get_fan_speed_rpm() - modifed arg0 to match Powerplay API 
get_fan_speed_rpm

Other Changes
  added 6 above smu fan Powerplay functions to swsmu_dpm_funcs
  removed special smu handling of above functions and called through Powerplay 
API

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c| 87 +++
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   | 12 ++--
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 33 ++---
 3 files changed, 55 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 0008bbe971d6..b345c29147b9 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2349,18 +2349,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct 
device *dev,
return ret;
}
 
-   if (is_support_sw_smu(adev)) {
-   pwm_mode = smu_get_fan_control_mode(&adev->smu);
-   } else {
-   if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+   if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
-   }
-
-   pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
 
+   pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
@@ -2389,18 +2385,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct 
device *dev,
return ret;
}
 
-   if (is_support_sw_smu(adev)) {
-   smu_set_fan_control_mode(&adev->smu, value);
-   } else {
-   if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
-   pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-   pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-   return -EINVAL;
-   }
-
-   amdgpu_dpm_set_fan_control_mode(adev, value);
+   if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+   pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+   pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+   return -EINVAL;
}
 
+   amdgpu_dpm_set_fan_control_mode(adev, value);
+
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
@@ -2439,11 +2431,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
return err;
}
 
-   if (is_support_sw_smu(adev))
-   pwm_mode = smu_get_fan_control_mode(&adev->smu);
-   else
-   pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
+   pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -2460,9 +2448,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
 
value = (value * 100) / 255;
 
-   if (is_support_sw_smu(adev))
-   err = smu_set_fan_speed_percent(&adev->smu, value);
-   else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
+   if (adev->powerplay.pp_funcs->set_fan_speed_percent)
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
else
err = -EINVAL;
@@ -2493,9 +2479,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return err;
}
 
-   if (is_support_sw_smu(adev))
-   err = smu_get_fan_speed_percent(&adev->smu, &speed);
-   else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
+   if (adev->powerplay.pp_funcs->get_fan_speed_percent)
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
else
err = -EINVAL;
@@ -2528,9 +2512,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device 
*dev,
return err;
}
 
-   if (is_support_sw_smu(adev))
-   err = smu_get_fan_speed_rpm(&adev->smu, &speed);
-   else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
+   if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
else
 

[PATCH 4/8] amdgpu/pm: Powerplay API for smu , changed 9 pm power functions to use API

2020-12-18 Thread Darren Powell
Modified Files
  smu_get_power_limit() - modifed arg0 to match Powerplay API 
get_power_limit
  smu_set_power_limit() - modifed arg0 to match Powerplay API 
set_power_limit
  smu_sys_get_pp_table()- modifed signature to match Powerplay API 
get_pp_table
  smu_get_power_num_states()- modifed arg0 to match Powerplay API 
get_pp_num_states
  smu_get_current_power_state() - modifed arg0 to match Powerplay API 
get_current_power_state
  smu_sys_get_pp_feature_mask() - modifed signature to match Powerplay API 
get_ppfeature_status
  smu_sys_set_pp_feature_mask() - modifed arg0 to match Powerplay API 
set_ppfeature_status

Other Changes
  added 7 above smu Powerplay functions to swsmu_dpm_funcs
  removed special smu handling of above functions and called through Powerplay 
API

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c| 73 ---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   | 16 +++--
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 59 +++---
 3 files changed, 69 insertions(+), 79 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b345c29147b9..41da5870af58 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -124,6 +124,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device 
*dev,
 {
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_pm_state_type pm;
int ret;
 
@@ -136,12 +137,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device 
*dev,
return ret;
}
 
-   if (is_support_sw_smu(adev)) {
-   if (adev->smu.ppt_funcs->get_current_power_state)
-   pm = smu_get_current_power_state(&adev->smu);
-   else
-   pm = adev->pm.dpm.user_state;
-   } else if (adev->powerplay.pp_funcs->get_current_power_state) {
+   if (pp_funcs->get_current_power_state) {
pm = amdgpu_dpm_get_current_power_state(adev);
} else {
pm = adev->pm.dpm.user_state;
@@ -307,6 +303,7 @@ static ssize_t 
amdgpu_set_power_dpm_force_performance_level(struct device *dev,
 {
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
@@ -342,9 +339,7 @@ static ssize_t 
amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return ret;
}
 
-   if (is_support_sw_smu(adev))
-   current_level = smu_get_performance_level(&adev->smu);
-   else if (adev->powerplay.pp_funcs->get_performance_level)
+   if (pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
 
if (current_level == level) {
@@ -381,7 +376,7 @@ static ssize_t 
amdgpu_set_power_dpm_force_performance_level(struct device *dev,
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
-   } else if (adev->powerplay.pp_funcs->force_performance_level) {
+   } else if (pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
mutex_unlock(&adev->pm.mutex);
@@ -412,6 +407,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
 {
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
int i, buf_len, ret;
 
@@ -424,12 +420,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device 
*dev,
return ret;
}
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_get_power_num_states(&adev->smu, &data);
-   if (ret)
+   if (pp_funcs->get_pp_num_states) {
+   ret = amdgpu_dpm_get_pp_num_states(adev, &data);
+   if (is_support_sw_smu(adev) && ret)
return ret;
-   } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
-   amdgpu_dpm_get_pp_num_states(adev, &data);
} else {
memset(&data, 0, sizeof(data));
}
@@ -454,8 +448,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
 {
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
-   struct smu_context *smu = &adev->smu;
enum amd_pm_state_type pm = 0;
  

[PATCH 2/8] amdgpu/pm: Powerplay API for smu , changed 6 dpm reset functions to use API

2020-12-18 Thread Darren Powell
Modified Functions
  smu_set_xgmi_pstate()   - modifed arg0 to match Powerplay API 
set_xgmi_pstate
  smu_mode2_reset()   - modifed arg0 to match Powerplay API 
asic_reset_mode_2
  smu_switch_power_profile()  - modifed arg0 to match Powerplay API 
switch_power_profile
  smu_set_mp1_state() - modifed arg0 to match Powerplay API 
set_mp1_state
  smu_set_df_cstate() - modifed arg0 to match Powerplay API 
set_df_cstate
  smu_enable_mgpu_fan_boost() - modifed arg0 to match Powerplay API 
enable_mgpu_fan_boost

Other Changes
  added above smu reset Powerplay functions to swsmu_dpm_funcs
  removed special smu handling of above functions and called through Powerplay 
API

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c   | 46 +++
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   | 12 +++---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 27 ++---
 3 files changed, 41 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 8fb12afe3c96..df5e7b573428 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -1059,12 +1059,10 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 enum pp_mp1_state mp1_state)
 {
int ret = 0;
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-   if (is_support_sw_smu(adev)) {
-   ret = smu_set_mp1_state(&adev->smu, mp1_state);
-   } else if (adev->powerplay.pp_funcs &&
-  adev->powerplay.pp_funcs->set_mp1_state) {
-   ret = adev->powerplay.pp_funcs->set_mp1_state(
+   if (pp_funcs && pp_funcs->set_mp1_state) {
+   ret = pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
mp1_state);
}
@@ -1096,16 +1094,11 @@ int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 {
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
-   struct smu_context *smu = &adev->smu;
 
-   if (is_support_sw_smu(adev)) {
-   return smu_mode2_reset(smu);
-   } else {
-   if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
-   return -ENOENT;
+   if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
+   return -ENOENT;
 
-   return pp_funcs->asic_reset_mode_2(pp_handle);
-   }
+   return pp_funcs->asic_reset_mode_2(pp_handle);
 }
 
 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
@@ -1166,16 +1159,14 @@ int amdgpu_dpm_switch_power_profile(struct 
amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
 {
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
 
if (amdgpu_sriov_vf(adev))
return 0;
 
-   if (is_support_sw_smu(adev))
-   ret = smu_switch_power_profile(&adev->smu, type, en);
-   else if (adev->powerplay.pp_funcs &&
-adev->powerplay.pp_funcs->switch_power_profile)
-   ret = adev->powerplay.pp_funcs->switch_power_profile(
+   if (pp_funcs && pp_funcs->switch_power_profile)
+   ret = pp_funcs->switch_power_profile(
adev->powerplay.pp_handle, type, en);
 
return ret;
@@ -1184,13 +1175,11 @@ int amdgpu_dpm_switch_power_profile(struct 
amdgpu_device *adev,
 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
   uint32_t pstate)
 {
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
 
-   if (is_support_sw_smu(adev))
-   ret = smu_set_xgmi_pstate(&adev->smu, pstate);
-   else if (adev->powerplay.pp_funcs &&
-adev->powerplay.pp_funcs->set_xgmi_pstate)
-   ret = 
adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+   if (pp_funcs && pp_funcs->set_xgmi_pstate)
+   ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
pstate);
 
return ret;
@@ -1202,12 +1191,8 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
-   struct smu_context *smu = &adev->smu;
 
-   if (is_support_sw_smu(adev))
-   ret = smu_set_df_cstate(smu, cstate);
-   else if (pp_funcs &&
-pp_funcs->set_df_cstate)
+   if (pp_funcs && pp_funcs->set_df_cstate)
ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 
return ret;
@@ -1228,12 +1213,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct 
amdgpu_device *adev)
void *pp_handle = adev->

[PATCH 1/8] amdgpu/pm: Powerplay API for smu , added get_performance_level

2020-12-18 Thread Darren Powell
Modified Functions
  smu_get_performance_level() - modifed arg0 to match Powerplay API 
get_performance_level

Other Changes
  added a new structure swsmu_dpm_funcs to hold smu functions for Powerplay API
  removed special smu handling from amdgpu_get_power_dpm_force_performance_level

Signed-off-by: Darren Powell 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c|  4 +---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h   |  2 +-
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 12 +++-
 3 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 7b6ef05a1d35..0008bbe971d6 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -280,9 +280,7 @@ static ssize_t 
amdgpu_get_power_dpm_force_performance_level(struct device *dev,
return ret;
}
 
-   if (is_support_sw_smu(adev))
-   level = smu_get_performance_level(&adev->smu);
-   else if (adev->powerplay.pp_funcs->get_performance_level)
+   if (adev->powerplay.pp_funcs->get_performance_level)
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 89be49a43500..10914f3438ac 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -743,7 +743,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum 
smu_clk_type clk_type,
   uint32_t *min, uint32_t *max);
 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type 
clk_type,
uint32_t min, uint32_t max);
-enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
+enum amd_dpm_forced_level smu_get_performance_level(void *handle);
 int smu_force_performance_level(struct smu_context *smu, enum 
amd_dpm_forced_level level);
 int smu_set_display_count(struct smu_context *smu, uint32_t count);
 int smu_set_ac_dc(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index cf999b7a2164..36d18668ec99 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -46,6 +46,8 @@
 #undef pr_info
 #undef pr_debug
 
+static const struct amd_pm_funcs swsmu_dpm_funcs;
+
 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
 {
size_t size = 0;
@@ -428,6 +430,9 @@ static int smu_early_init(void *handle)
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
 
+   adev->powerplay.pp_handle = smu;
+   adev->powerplay.pp_funcs = &swsmu_dpm_funcs;
+
return smu_set_funcs(adev);
 }
 
@@ -1569,8 +1574,9 @@ int smu_switch_power_profile(struct smu_context *smu,
return 0;
 }
 
-enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
+enum amd_dpm_forced_level smu_get_performance_level(void *handle)
 {
+   struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum amd_dpm_forced_level level;
 
@@ -2549,3 +2555,7 @@ int smu_gfx_state_change_set(struct smu_context *smu, 
uint32_t state)
 
return ret;
 }
+
+static const struct amd_pm_funcs swsmu_dpm_funcs = {
+   .get_performance_level = smu_get_performance_level,
+};
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 0/8] amdgpu/pm: Powerplay API for smu

2020-12-18 Thread Darren Powell


=== Description ===
Patches to add the Powerplay API to smu and simplify dpm/pm calling code

=== Test System ===
* DESKTOP(AMD FX-8350 + NAVI10(731F/ca), BIOS: F2)
 + ISO(Ubuntu 20.04.1 LTS)
 + Kernel(5.9.0-rc5-custom-pmcallback-00865-gd3b9ec1a13c9)

=== Patch Summary ===
   linux: (git://people.freedesktop.org/~agd5f/linux) 
origin/amd-staging-drm-next @ d3b9ec1a13c9 
+ 05e1d87d1cb4 amdgpu/pm: Powerplay API for smu , added 
get_performance_level
+ b9e2be067f01 amdgpu/pm: Powerplay API for smu , changed 6 dpm reset 
functions to use API
+ 9add1d6485a4 amdgpu/pm: Powerplay API for smu , changed 6 pm hwmon fan 
functions to use API
+ c02b35074f14 amdgpu/pm: Powerplay API for smu , changed 9 pm power 
functions to use API
+ 483bd1207f2c amdgpu/pm: Powerplay API for smu , changed 5 dpm powergating 
& sensor functions to use API
+ f91c1665e30e amdgpu/pm: Powerplay API for smu , changes to clock and 
profile mode functions
+ d1400afa4785 amdgpu/pm: Powerplay API for smu , changed 4 dpm functions 
to use API
+ 2f12f7afa690 amdgpu/pm: Powerplay API for smu , updates to some pm 
functions

=== Tests ===
** 0001
 amdgpu_get_power_dpm_force_performance_level()<- 
/sys/class/drm/card0/device/power_dpm_force_performance_level
** 0002
 amdgpu_dpm_set_mp1_state  <- systemctl suspend
 amdgpu_dpm_mode2_reset<- untested: needs Arctic Islands 
Hardware (VEGAx, RAVEN)
 amdgpu_dpm_switch_power_profile   <- untested
 amdgpu_dpm_set_xgmi_pstate<- untested: 
amdgpu_xgmi_set_pstate():411 pstate switching disabled
 amdgpu_dpm_set_df_cstate  <- untested: needs ras enabled hardware
 amdgpu_dpm_enable_mgpu_fan_boost  <- untested: needs multi-gpu hardware
** 0003
 amdgpu_hwmon_get_pwm1_enable<- 
/sys/class/drm/card0/device/hwmon/hwmon?/pwm1_enable
 amdgpu_hwmon_set_pwm1_enable<- 
/sys/class/drm/card0/device/hwmon/hwmon?/pwm1_enable
 amdgpu_hwmon_set_pwm1   <- 
/sys/class/drm/card0/device/hwmon/hwmon?/pwm1
 amdgpu_hwmon_get_pwm1   <- 
/sys/class/drm/card0/device/hwmon/hwmon?/pwm1
 amdgpu_hwmon_get_fan1_input <- 
/sys/class/drm/card0/device/hwmon/hwmon?/fan1_input
 amdgpu_hwmon_get_fan1_target<- 
/sys/class/drm/card0/device/hwmon/hwmon?/fan1_target
 amdgpu_hwmon_set_fan1_target<- 
/sys/class/drm/card0/device/hwmon/hwmon?/fan1_target
 amdgpu_hwmon_get_fan1_enable<- 
/sys/class/drm/card0/device/hwmon/hwmon?/fan1_enable
 amdgpu_hwmon_set_fan1_enable<- 
/sys/class/drm/card0/device/hwmon/hwmon?/fan1_enable
** 0004
 amdgpu_get_power_dpm_state  <- 
/sys/class/drm/card0/device/power_dpm_state
 amdgpu_set_power_dpm_force_performance_level<- 
/sys/class/drm/card0/device/power_dpm_force_performance_level
 amdgpu_get_pp_num_states<- 
/sys/class/drm/card0/device/pp_num_states
 amdgpu_get_pp_cur_state <- 
/sys/class/drm/card0/device/pp_cur_state
 amdgpu_get_pp_table <- 
/sys/class/drm/card0/device/pp_table
 amdgpu_get_pp_features  <- 
/sys/class/drm/card0/device/pp_features
 amdgpu_hwmon_show_power_cap_max <- 
/sys/class/drm/card0/device/hwmon/hwmon?/power1_cap_max
 amdgpu_hwmon_show_power_cap <- 
/sys/class/drm/card0/device/hwmon/hwmon?/power1_cap
 amdgpu_hwmon_set_power_cap  <- 
/sys/class/drm/card0/device/hwmon/hwmon?/power1_cap
** 0005
 amdgpu_dpm_baco_enter  <- untested: called from runtime.pm 
 amdgpu_dpm_baco_exit   <- untested: called from runtime.pm 
 amdgpu_dpm_is_baco_supported   <- untested: needs other Hardware(cik, 
vi, soc15)
 amdgpu_dpm_baco_reset  <- untested: needs other Hardware(cik, 
vi, soc15)
** 0006
 amdgpu_get_pp_dpm_sclk  <- /sys/class/drm/card0/device/pp_dpm_sclk
 amdgpu_set_pp_dpm_sclk  <- /sys/class/drm/card0/device/pp_dpm_sclk
 amdgpu_get_pp_dpm_mclk  <- /sys/class/drm/card0/device/pp_dpm_mclk
 amdgpu_set_pp_dpm_mclk  <- /sys/class/drm/card0/device/pp_dpm_mclk
 amdgpu_get_pp_dpm_socclk<- 
/sys/class/drm/card0/device/pp_dpm_socclk
 amdgpu_set_pp_dpm_socclk<- 
/sys/class/drm/card0/device/pp_dpm_socclk
 amdgpu_get_pp_dpm_fclk  <- /sys/class/drm/card0/device/pp_dpm_fclk
 amdgpu_set_pp_dpm_fclk  <- /sys/class/drm/card0/device/pp_dpm_fclk
 amdgpu_get_pp_dpm_dcefclk   <- 
/sys/class/drm/card0/device/pp_dpm_dcefclk
 amdgpu_set_pp_dpm_dcefclk   <- 
/sys/class/drm/card0/device/pp_dpm_dcefclk
 amdgpu_get_pp_dpm_pcie  <- /sys/class/drm/card0/device/pp_dpm_pcie
 amdgpu_set_pp_dpm_pcie  <- /sys/class/drm/card0/device/pp_dpm_pcie
 amdgpu_get_pp_power_profile_mode<- 
/sys/class/drm/card0/

[PATCH 14/14] drm/amd/display: add getter routine to retrieve mpcc mux

2020-12-18 Thread Bindu Ramamurthy
From: Josip Pavic 

[Why & How]
Add function to identify which MPCC is providing input to a specified OPP

Signed-off-by: Josip Pavic 
Acked-by: Bindu Ramamurthy 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 12 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h |  1 +
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c |  1 +
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c |  1 +
 drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h  |  4 
 5 files changed, 19 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 3fcd408e9103..a46cb20596fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -467,6 +467,17 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool 
lock)
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
 }
 
+unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
+{
+   struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+   uint32_t val;
+
+   if (opp_id < MAX_OPP && REG(MUX[opp_id]))
+   REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
+
+   return val;
+}
+
 static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -483,6 +494,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
.set_output_gamma = NULL,
+   .get_mpc_out_mux = mpc1_get_mpc_out_mux,
 };
 
 void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 66a4719c22a0..dbfffc6383dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -200,4 +200,5 @@ void mpc1_read_mpcc_state(
 
 void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
 
+unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id);
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 99cc095dc33c..6a99fdd55e8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -556,6 +556,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+   .get_mpc_out_mux = mpc1_get_mpc_out_mux,
 };
 
 void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index d7d053fc6e91..3e6f76096119 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1428,6 +1428,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.program_3dlut = mpc3_program_3dlut,
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+   .get_mpc_out_mux = mpc1_get_mpc_out_mux,
 
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 879f502ae530..75c77ad9cbfe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -359,6 +359,10 @@ struct mpc_funcs {
 
int (*release_rmu)(struct mpc *mpc, int mpcc_id);
 
+   unsigned int (*get_mpc_out_mux)(
+   struct mpc *mpc,
+   int opp_id);
+
 };
 
 #endif
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 13/14] drm/amd/display: always program DPPDTO unless not safe to lower

2020-12-18 Thread Bindu Ramamurthy
From: Jake Wang 

[Why]
We defer clock updates to after pipes have been programmed. In
some instances we use DPPCLK that have been previously set to be
"unused". This results in a brief window of time where underflow
could occur.

[How]
During prepare bandwidth allow rn_update_clocks_update_dpp_dto
to check each instance and compare previous clock to new clock.
If new clock is higher than previous clock, program DPPDTO.

Signed-off-by: Jake Wang 
Acked-by: Bindu Ramamurthy 
---
 .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 11 +--
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index bad30217c7b4..01b1853b7750 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -227,12 +227,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_dppclk(clk_mgr, 
clk_mgr_base->clks.dppclk_khz);
 
// always update dtos unless clock is lowered and not safe to 
lower
-   if (new_clocks->dppclk_khz >= 
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
-   rn_update_clocks_update_dpp_dto(
-   clk_mgr,
-   context,
-   clk_mgr_base->clks.actual_dppclk_khz,
-   safe_to_lower);
+   rn_update_clocks_update_dpp_dto(
+   clk_mgr,
+   context,
+   clk_mgr_base->clks.actual_dppclk_khz,
+   safe_to_lower);
}
 
if (update_dispclk &&
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 12/14] drm/amd/display: [FW Promotion] Release 0.0.47

2020-12-18 Thread Bindu Ramamurthy
From: Yongqiang Sun 

- restore lvtma_pwrseq_delay2 from vbios integrated info table
- restore MVID/NVID after power up.
- Enable timer wake up mask when enable timer interrupt.

Signed-off-by: Yongqiang Sun 
Acked-by: Bindu Ramamurthy 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index f512bda96917..249a076d6f69 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xa18e25995
+#define DMUB_FW_VERSION_GIT_HASH 0xf51b86a
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 46
+#define DMUB_FW_VERSION_REVISION 47
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/14] drm/amd/display: updated wm table for Renoir

2020-12-18 Thread Bindu Ramamurthy
From: Jake Wang 

[Why]
For certain timings, Renoir may underflow due to sr exit  latency
being too slow.

[How]
Updated wm table for renoir.

Signed-off-by: Jake Wang 
Acked-by: Bindu Ramamurthy 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c| 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 9aa1b63bb161..bad30217c7b4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -731,32 +731,32 @@ static struct wm_table ddr4_wm_table_rn = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
-   .sr_exit_time_us = 9.09,
-   .sr_enter_plus_exit_time_us = 10.14,
+   .sr_exit_time_us = 11.90,
+   .sr_enter_plus_exit_time_us = 12.80,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
-   .sr_exit_time_us = 11.12,
-   .sr_enter_plus_exit_time_us = 12.48,
+   .sr_exit_time_us = 13.18,
+   .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
-   .sr_exit_time_us = 11.12,
-   .sr_enter_plus_exit_time_us = 12.48,
+   .sr_exit_time_us = 13.18,
+   .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
-   .sr_exit_time_us = 11.12,
-   .sr_enter_plus_exit_time_us = 12.48,
+   .sr_exit_time_us = 13.18,
+   .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
}
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/14] drm/amd/display: Multi-display underflow observed

2020-12-18 Thread Bindu Ramamurthy
From: Aric Cyr 

[Why]
FP2 programming not happening when topology changes occur with multiple
displays.

[How]
Ensure FP2 is programmed whenever global sync changes occur but wait for
VACTIVE first to avoid underflow.

Signed-off-by: Aric Cyr 
Acked-by: Bindu Ramamurthy 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 20 ---
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 12 ---
 2 files changed, 9 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7339d9855ec8..58eb0d69873a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2625,26 +2625,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
 
-   if (update_type != UPDATE_TYPE_FAST) {
-   // If changing VTG FP2: wait until back in vactive to program 
FP2
-   // Need to ensure that pipe unlock happens soon after to 
minimize race condition
-   for (i = 0; i < dc->res_pool->pipe_count; i++) {
-   struct pipe_ctx *pipe_ctx = 
&context->res_ctx.pipe_ctx[i];
-
-   if (pipe_ctx->top_pipe || pipe_ctx->stream != stream)
-   continue;
-
-   if (!pipe_ctx->update_flags.bits.global_sync)
-   continue;
-
-   
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, 
CRTC_STATE_VBLANK);
-   
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, 
CRTC_STATE_VACTIVE);
-
-   pipe_ctx->stream_res.tg->funcs->set_vtg_params(
-   pipe_ctx->stream_res.tg, 
&pipe_ctx->stream->timing, true);
-   }
-   }
-
if ((update_type != UPDATE_TYPE_FAST) && 
dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, false);
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 31a477194d3b..cb822df21b7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1586,7 +1586,10 @@ static void dcn20_program_pipe(
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
hws->funcs.blank_pixel_data(dc, pipe_ctx, 
!pipe_ctx->plane_state->visible);
 
-   if (pipe_ctx->update_flags.bits.global_sync) {
+   /* Only update TG on top pipe */
+   if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+   && !pipe_ctx->prev_odm_pipe) {
+
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
pipe_ctx->pipe_dlg_param.vready_offset,
@@ -1594,8 +1597,11 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
 
+   
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, 
CRTC_STATE_VBLANK);
+   
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, 
CRTC_STATE_VACTIVE);
+
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
-   pipe_ctx->stream_res.tg, 
&pipe_ctx->stream->timing, false);
+   pipe_ctx->stream_res.tg, 
&pipe_ctx->stream->timing, true);
 
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@@ -2570,4 +2576,4 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
 {

pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp,
 test_pattern,
color_space, color_depth, solid_color, width, height, 
offset);
-}
\ No newline at end of file
+}
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 10/14] drm/amd/display: Acquire DSC during split stream for ODM only if top_pipe

2020-12-18 Thread Bindu Ramamurthy
From: Sung Lee 

[WHY]
DSC should only be acquired per OPP. Therefore, DSC should only
be acquired for the top_pipe when ODM is enabled.
Not doing this check may lead to acquiring more DSC's than needed
when doing MPO + ODM Combine.

[HOW]
Only acquire DSC if pipe is top_pipe.

Signed-off-by: Sung Lee 
Acked-by: Bindu Ramamurthy 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index ff36db5edf6c..e04ecf0fc0db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1933,7 +1933,7 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.opp = 
pool->opps[next_odm_pipe->pipe_idx];
else
next_odm_pipe->stream_res.opp = 
next_odm_pipe->top_pipe->stream_res.opp;
-   if (next_odm_pipe->stream->timing.flags.DSC == 1) {
+   if (next_odm_pipe->stream->timing.flags.DSC == 1 && 
!next_odm_pipe->top_pipe) {
dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, 
next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/14] drm/amd/display: change SMU repsonse timeout to 2s.

2020-12-18 Thread Bindu Ramamurthy
From: Yongqiang Sun 

[Why]
there is some garbage showing up during reboot test.
Reason:
SMU might handle display driver msg defered and driver will send
next msg to SMU after 10ms timeout, once SMU FW handle previous msg,
parameters are changed to next one, which result in a wrong value be programmed.

[How]
Extend timeout to 2s so SMU will have enough time to handle driver msg.

Signed-off-by: Yongqiang Sun 
Acked-by: Bindu Ramamurthy 
---
 .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 11a7b583d561..7deeec9d1c7c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -99,7 +99,7 @@ int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal 
*clk_mgr, unsigned
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
 
-   result = rn_smu_wait_for_response(clk_mgr, 10, 1000);
+   result = rn_smu_wait_for_response(clk_mgr, 10, 20);
 
ASSERT(result == VBIOSSMC_Result_OK || result == 
VBIOSSMC_Result_UnknownCmd);
 
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/14] drm/amd/display: Remove unnecessary NULL check

2020-12-18 Thread Bindu Ramamurthy
From: Eryk Brol 

[Why]
new_crtc_state is already dereferenced earlier in the function

[How]
Remove the check

Signed-off-by: Eryk Brol 
Acked-by: Bindu Ramamurthy 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 245bd1284e5f..ff4776877e1f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9455,7 +9455,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
 
-   if (dm_old_crtc_state->dsc_force_changed && new_crtc_state)
+   if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
}
 
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/14] drm/amd/display: gradually ramp ABM intensity

2020-12-18 Thread Bindu Ramamurthy
From: Rizvi 

[Why]
Need driver to pass values of backlight ramp start and ramp reduction so
that intensity can be ramped down appropriately.

[How]
Using abm_parameters structure to get these values from driver.

Signed-off-by: Rizvi 
Acked-by: Bindu Ramamurthy 
---
 .../amd/display/modules/power/power_helpers.c | 35 +--
 .../amd/display/modules/power/power_helpers.h |  1 +
 2 files changed, 26 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c 
b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index cc983f662157..4fd8bce95d84 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -82,22 +82,24 @@ struct abm_parameters {
unsigned char deviation_gain;
unsigned char min_knee;
unsigned char max_knee;
+   unsigned short blRampReduction;
+   unsigned short blRampStart;
 };
 
 static const struct abm_parameters abm_settings_config0[abm_defines_max_level] 
= {
-//  min_red  max_red  bright_pos  dark_pos  brightness_gain  contrast  
deviation  min_knee  max_knee
-   {0xff,   0xbf,0x20,   0x00, 0xff,0x99, 
0xb3,  0x40, 0xe0},
-   {0xde,   0x85,0x20,   0x00, 0xff,0x90, 
0xa8,  0x40, 0xdf},
-   {0xb0,   0x50,0x20,   0x00, 0xc0,0x88, 
0x78,  0x70, 0xa0},
-   {0x82,   0x40,0x20,   0x00, 0x00,0xff, 
0xb3,  0x70, 0x70},
+//  min_red  max_red  bright_pos  dark_pos  bright_gain  contrast  dev   
min_knee  max_knee  blStart  blRed
+   {0xff,   0xbf,0x20,   0x00, 0xff,0x99, 0xb3, 0x40,  
   0xe0, 0x,  0x},
+   {0xde,   0x85,0x20,   0x00, 0xff,0x90, 0xa8, 0x40,  
   0xdf, 0x,  0x},
+   {0xb0,   0x50,0x20,   0x00, 0xc0,0x88, 0x78, 0x70,  
   0xa0, 0x,  0x},
+   {0x82,   0x40,0x20,   0x00, 0x00,0xff, 0xb3, 0x70,  
   0x70, 0x,  0x},
 };
 
 static const struct abm_parameters abm_settings_config1[abm_defines_max_level] 
= {
-//  min_red  max_red  bright_pos  dark_pos  brightness_gain  contrast  
deviation  min_knee  max_knee
-   {0xf0,   0xd9,0x20,   0x00, 0x00,0xff, 
0xb3,  0x70, 0x70},
-   {0xcd,   0xa5,0x20,   0x00, 0x00,0xff, 
0xb3,  0x70, 0x70},
-   {0x99,   0x65,0x20,   0x00, 0x00,0xff, 
0xb3,  0x70, 0x70},
-   {0x82,   0x4d,0x20,   0x00, 0x00,0xff, 
0xb3,  0x70, 0x70},
+//  min_red  max_red  bright_pos  dark_pos  bright_gain  contrast  dev   
min_knee  max_knee  blStart  blRed
+   {0xf0,   0xd9,0x20,   0x00, 0x00,0xff, 0xb3, 0x70,  
   0x70, 0x,  0x},
+   {0xcd,   0xa5,0x20,   0x00, 0x00,0xff, 0xb3, 0x70,  
   0x70, 0x,  0x},
+   {0x99,   0x65,0x20,   0x00, 0x00,0xff, 0xb3, 0x70,  
   0x70, 0x,  0x},
+   {0x82,   0x4d,0x20,   0x00, 0x00,0xff, 0xb3, 0x70,  
   0x70, 0x,  0x},
 };
 
 static const struct abm_parameters * const abm_settings[] = {
@@ -662,6 +664,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
 {
struct iram_table_v_2_2 ram_table;
struct abm_config_table config;
+   unsigned int set = params.set;
bool result = false;
uint32_t i, j = 0;
 
@@ -710,6 +713,18 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.max_knee[i] = ram_table.max_knee[i];
}
 
+   if (params.backlight_ramping_override) {
+   for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+   config.blRampReduction[i] = 
params.backlight_ramping_reduction;
+   config.blRampStart[i] = params.backlight_ramping_start;
+   }
+   } else {
+   for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+   config.blRampReduction[i] = 
abm_settings[set][i].blRampReduction;
+   config.blRampStart[i] = 
abm_settings[set][i].blRampStart;
+   }
+   }
+
config.min_abm_backlight = ram_table.min_abm_backlight;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h 
b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index fa4728d88092..6f2eecce6baa 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -39,6 +39,7 @@ enum abm_defines {
 struct dmcu_iram_parameters {
unsigned int *backlight_lut_array;
unsigned int backlight_lut_array_size;
+   bool b

[PATCH 03/14] drm/amd/display: Modify the hdcp device count check condition

2020-12-18 Thread Bindu Ramamurthy
From: Martin Tsai 

[why]
Some MST display may not report the internal panel to DEVICE_COUNT,
that makes the check condition always failed.

[how]
To update this condition with the reported device count + 1
(because the immediate repeater's internal panel is possibly
not included in DEVICE_COUNT)

Signed-off-by: Martin Tsai 
Acked-by: Bindu Ramamurthy 
---
 .../gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c| 8 ++--
 .../gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c| 7 +--
 2 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index f244b72e74e0..73ca49f05bd3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -128,8 +128,12 @@ static inline uint8_t get_device_count(struct mod_hdcp 
*hdcp)
 
 static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
-   /* device count must be greater than or equal to tracked hdcp displays 
*/
-   return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+   /* Some MST display may choose to report the internal panel as an HDCP 
RX.
+* To update this condition with 1(because the immediate repeater's 
internal
+* panel is possibly not included in DEVICE_COUNT) + 
get_device_count(hdcp).
+* Device count must be greater than or equal to tracked hdcp displays.
+*/
+   return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
 }
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 549c113abcf7..a0895a7efda2 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -207,8 +207,11 @@ static inline uint8_t get_device_count(struct mod_hdcp 
*hdcp)
 
 static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
-   /* device count must be greater than or equal to tracked hdcp displays 
*/
-   return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+   /* Some MST display may choose to report the internal panel as an HDCP 
RX.   */
+   /* To update this condition with 1(because the immediate repeater's 
internal */
+   /* panel is possibly not included in DEVICE_COUNT) + 
get_device_count(hdcp). */
+   /* Device count must be greater than or equal to tracked hdcp displays. 
 */
+   return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
 }
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/14] drm/amd/display: Update RN/VGH active display count workaround

2020-12-18 Thread Bindu Ramamurthy
From: Michael Strauss 

[WHY]
Virtual signals were previously counted as a workaround to S0i2 hang
which is fixed on Renoir. This blocks S0i3 diags testing.

[HOW]
Stop counting virtual signals as S0i2 hang is fixed on Renoir.

Signed-off-by: Michael Strauss 
Acked-by: Bindu Ramamurthy 
---
 .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c| 9 +
 .../gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c   | 9 +
 2 files changed, 2 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index d00b02553d62..9aa1b63bb161 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -75,15 +75,8 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
 
-   /*
-* Only notify active stream or virtual stream.
-* Need to notify virtual stream to work around
-* headless case. HPD does not fire when system is in
-* S0i2.
-*/
/* abusing the fact that the dig and phy are coupled to see if 
the phy is enabled */
-   if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
-   
link->link_enc->funcs->is_dig_enabled(link->link_enc))
+   if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
 
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9a8e66bba9c0..991b9c5beaa3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -74,15 +74,8 @@ int vg_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
 
-   /*
-* Only notify active stream or virtual stream.
-* Need to notify virtual stream to work around
-* headless case. HPD does not fire when system is in
-* S0i2.
-*/
/* abusing the fact that the dig and phy are coupled to see if 
the phy is enabled */
-   if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
-   
link->link_enc->funcs->is_dig_enabled(link->link_enc))
+   if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
 
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/14] drm/amd/display: Interfaces for hubp blank and soft reset

2020-12-18 Thread Bindu Ramamurthy
From: Wesley Chalmers 

[WHY]
HUBP blanking sequence on DCN30 requires us to check if HUBP is in blank
and also toggle HUBP_DISABLE, which should instead be called
HUBP_SOFT_RESET for what it does in HW.

Signed-off-by: Wesley Chalmers 
Acked-by: Bindu Ramamurthy 
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c  | 18 ++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h  |  4 
 .../gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c  |  2 ++
 .../gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c  |  2 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h   |  2 ++
 5 files changed, 28 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 41679ad531c5..9e796dfeac20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1241,6 +1241,22 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
 }
 
+bool hubp1_in_blank(struct hubp *hubp)
+{
+   uint32_t in_blank;
+   struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+   REG_GET(DCHUBP_CNTL, HUBP_IN_BLANK, &in_blank);
+   return in_blank ? true : false;
+}
+
+void hubp1_soft_reset(struct hubp *hubp, bool reset)
+{
+   struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+   REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
+}
+
 void hubp1_init(struct hubp *hubp)
 {
//do nothing
@@ -1272,6 +1288,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
 
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
+   .hubp_soft_reset = hubp1_soft_reset,
+   .hubp_in_blank = hubp1_in_blank,
 };
 
 /*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 780af5b3c16f..a9a6ed7f4f99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -260,6 +260,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+   HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
@@ -455,6 +456,7 @@
type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
type HUBP_UNDERFLOW_CLEAR;\
+   type HUBP_IN_BLANK;\
type NUM_PIPES;\
type NUM_BANKS;\
type PIPE_INTERLEAVE;\
@@ -772,5 +774,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
 
 void hubp1_init(struct hubp *hubp);
 void hubp1_read_state_common(struct hubp *hubp);
+bool hubp1_in_blank(struct hubp *hubp);
+void hubp1_soft_reset(struct hubp *hubp, bool reset);
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b7e44e53a342..0df0da2e6a4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1595,6 +1595,8 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
.validate_dml_output = hubp2_validate_dml_output,
+   .hubp_in_blank = hubp1_in_blank,
+   .hubp_soft_reset = hubp1_soft_reset,
 };
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index af462fe4260d..88ffa9ff1ed1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -509,6 +509,8 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp3_init,
+   .hubp_in_blank = hubp1_in_blank,
+   .hubp_soft_reset = hubp1_soft_reset,
 };
 
 bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 315e3061c592..22f3f643ed1b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -188,6 +188,8 @@ struct hubp_funcs {
void (*set_unbounded_requesting)(
struct hubp *hubp,
bool enable);
+   bool (*hubp_in_blank)(struct hubp *hubp);
+   void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
 
 };
 
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/14] drm/amd/display: To modify the condition in indicating branch device

2020-12-18 Thread Bindu Ramamurthy
From: Martin Tsai 

[why]
The sink count change HPD_IRQ will be ignored if the branch device has only
DP DFP.

[how]
To remove the port type restriction.

Signed-off-by: Martin Tsai 
Acked-by: Bindu Ramamurthy 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 8 +---
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6b11d4af54af..2fc12239b22c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3173,13 +3173,7 @@ static void get_active_converter_info(
}
 
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
-   if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
-   link->dpcd_caps.is_branch_dev = false;
-   }
-
-   else {
-   link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
-   }
+   link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
 
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/14] drm/amd/display: handler not correctly checked at remove_irq_handler

2020-12-18 Thread Bindu Ramamurthy
From: Qingqing Zhuo 

[why]
handler is supposedly passed in as a function pointer;
however, the entire struct amdgpu_dm_irq_handler_data
gets from the list is used to check match.

[how]
use the interrupt_handler within amdgpu_dm_irq_handler_data
for checking match.

Signed-off-by: Qingqing Zhuo 
Acked-by: Bindu Ramamurthy 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 357778556b06..26ed70e5538a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -165,7 +165,10 @@ static struct list_head *remove_irq_handler(struct 
amdgpu_device *adev,
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
 list);
 
-   if (ih == handler) {
+   if (handler == NULL)
+   continue;
+
+   if (ih == handler->handler) {
/* Found our handler. Remove it from the list. */
list_del(&handler->list);
handler_removed = true;
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/14] DC Patches December 21, 2020

2020-12-18 Thread Bindu Ramamurthy
This DC patchset brings improvements in multiple areas.
In summary, we highlight:

* Ramp down ABM intensity. 
* Firmware version 0.0.47.
* Updation of wm table for Renoir.
* Extend smu response timeout,Interfaces for hubp blank and soft reset.

Aric Cyr (1):
  drm/amd/display: Multi-display underflow observed

Eryk Brol (1):
  drm/amd/display: Remove unnecessary NULL check

Jake Wang (2):
  drm/amd/display: updated wm table for Renoir
  drm/amd/display: always program DPPDTO unless not safe to lower

Josip Pavic (1):
  drm/amd/display: add getter routine to retrieve mpcc mux

Martin Tsai (2):
  drm/amd/display: Modify the hdcp device count check condition
  drm/amd/display: To modify the condition in indicating branch device

Michael Strauss (1):
  drm/amd/display: Update RN/VGH active display count workaround

Qingqing Zhuo (1):
  drm/amd/display: handler not correctly checked at remove_irq_handler

Rizvi (1):
  drm/amd/display: gradually ramp ABM intensity

Sung Lee (1):
  drm/amd/display: Acquire DSC during split stream for ODM only if
top_pipe

Wesley Chalmers (1):
  drm/amd/display: Interfaces for hubp blank and soft reset

Yongqiang Sun (2):
  drm/amd/display: change SMU repsonse timeout to 2s.
  drm/amd/display: [FW Promotion] Release 0.0.47

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c |  5 ++-
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 36 ---
 .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c   |  2 +-
 .../display/dc/clk_mgr/dcn301/vg_clk_mgr.c|  9 +
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 20 ---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |  8 +
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 18 ++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h |  4 +++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c  | 12 +++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h  |  1 +
 .../gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c |  2 ++
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 12 +--
 .../gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c  |  1 +
 .../drm/amd/display/dc/dcn20/dcn20_resource.c |  2 +-
 .../gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c |  2 ++
 .../gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c  |  1 +
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  |  2 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h   |  4 +++
 .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   |  4 +--
 .../display/modules/hdcp/hdcp1_execution.c|  8 +++--
 .../display/modules/hdcp/hdcp2_execution.c|  7 ++--
 .../amd/display/modules/power/power_helpers.c | 35 --
 .../amd/display/modules/power/power_helpers.h |  1 +
 24 files changed, 118 insertions(+), 80 deletions(-)

-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: Create and Destroy PSR resources for DCN302

2020-12-18 Thread Bhawanpreet Lakha
From: Joshua Aberback 

We need these to support PSR on DCN302

Signed-off-by: Joshua Aberback 
Signed-off-by: Bhawanpreet Lakha 
---
 .../gpu/drm/amd/display/dc/dcn302/dcn302_resource.c | 13 +
 1 file changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 808c4dcdb3ac..8d24cd5e484e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -53,6 +53,7 @@
 #include "dce/dce_i2c_hw.h"
 #include "dce/dce_panel_cntl.h"
 #include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
 
 #include "hw_sequencer_private.h"
 #include "reg_helper.h"
@@ -238,6 +239,7 @@ static const struct dc_debug_options debug_defaults_diags = 
{
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.enable_tri_buf = true,
+   .disable_psr = true,
 };
 
 enum dcn302_clk_src_array_id {
@@ -1213,6 +1215,9 @@ static void dcn302_resource_destruct(struct resource_pool 
*pool)
dce_abm_destroy(&pool->multiple_abms[i]);
}
 
+   if (pool->psr != NULL)
+   dmub_psr_destroy(&pool->psr);
+
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
 }
@@ -1469,6 +1474,14 @@ static bool dcn302_resource_construct(
}
pool->timing_generator_count = i;
 
+   /* PSR */
+   pool->psr = dmub_psr_create(ctx);
+   if (pool->psr == NULL) {
+   dm_error("DC: failed to create psr!\n");
+   BREAK_TO_DEBUGGER();
+   goto create_fail;
+   }
+
/* ABMs */
for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], 
&abm_shift, &abm_mask);
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdkfd: check both client id and src id in interrupt handlers

2020-12-18 Thread Alex Deucher
On Fri, Dec 18, 2020 at 4:54 PM Felix Kuehling  wrote:
>
> Am 2020-12-18 um 4:34 p.m. schrieb Alex Deucher:
> > We can have the same src ids for different client ids so make sure to
> > check both the client id and the source id when handling interrupts.
> >
> > Signed-off-by: Alex Deucher 
>
> Looks reasonable to me. Does this fix a real problem, e.g. KFD
> intercepting an interrupt meant for another client?

yeah, we are debugging an issue on another chip and ending up doing
the wrong thing here because the source ids are the same for a
different client id.  I guess another option would be to filter the
client ids sent to amdgpu_amdkfd_interrupt().  E.g.,

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index afbbec82a289..77a542c2c194 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -464,7 +464,20 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
}

/* Send it to amdkfd as well if it isn't already handled */
-   if (!handled)
+   if (!handled &&
+   (client_id == AMDGPU_IRQ_CLIENTID_LEGACY ||
+client_id == SOC15_IH_CLIENTID_GRBM_CP ||
+client_id == SOC15_IH_CLIENTID_SDMA0 ||
+client_id == SOC15_IH_CLIENTID_SDMA1 ||
+client_id == SOC15_IH_CLIENTID_SDMA2 ||
+client_id == SOC15_IH_CLIENTID_SDMA3 ||
+client_id == SOC15_IH_CLIENTID_SDMA4 ||
+client_id == SOC15_IH_CLIENTID_SDMA5 ||
+client_id == SOC15_IH_CLIENTID_SDMA6 ||
+client_id == SOC15_IH_CLIENTID_SDMA7 ||
+client_id == SOC15_IH_CLIENTID_VMC ||
+client_id == SOC15_IH_CLIENTID_VMC1 ||
+client_id == SOC15_IH_CLIENTID_UTCL2))
amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
 }


Alex


>
> Reviewed-by: Felix Kuehling 
>
> > ---
> >  .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c   | 46 ++-
> >  1 file changed, 35 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c 
> > b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
> > index 241bd6ff79f4..0ca0327a39e5 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
> > @@ -44,6 +44,21 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
> >   client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
> >   pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
> >
> > + /* Only handle clients we care about */
> > + if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
> > + client_id != SOC15_IH_CLIENTID_SDMA0 &&
> > + client_id != SOC15_IH_CLIENTID_SDMA1 &&
> > + client_id != SOC15_IH_CLIENTID_SDMA2 &&
> > + client_id != SOC15_IH_CLIENTID_SDMA3 &&
> > + client_id != SOC15_IH_CLIENTID_SDMA4 &&
> > + client_id != SOC15_IH_CLIENTID_SDMA5 &&
> > + client_id != SOC15_IH_CLIENTID_SDMA6 &&
> > + client_id != SOC15_IH_CLIENTID_SDMA7 &&
> > + client_id != SOC15_IH_CLIENTID_VMC &&
> > + client_id != SOC15_IH_CLIENTID_VMC1 &&
> > + client_id != SOC15_IH_CLIENTID_UTCL2)
> > + return false;
> > +
> >   /* This is a known issue for gfx9. Under non HWS, pasid is not set
> >* in the interrupt payload, so we need to find out the pasid on our
> >* own.
> > @@ -96,17 +111,26 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
> >   vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
> >   context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
> >
> > - if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
> > - kfd_signal_event_interrupt(pasid, context_id, 32);
> > - else if (source_id == SOC15_INTSRC_SDMA_TRAP)
> > - kfd_signal_event_interrupt(pasid, context_id & 0xfff, 28);
> > - else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
> > - kfd_signal_event_interrupt(pasid, context_id & 0xff, 24);
> > - else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
> > - kfd_signal_hw_exception_event(pasid);
> > - else if (client_id == SOC15_IH_CLIENTID_VMC ||
> > - client_id == SOC15_IH_CLIENTID_VMC1 ||
> > -  client_id == SOC15_IH_CLIENTID_UTCL2) {
> > + if (client_id == SOC15_IH_CLIENTID_GRBM_CP) {
> > + if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
> > + kfd_signal_event_interrupt(pasid, context_id, 32);
> > + else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
> > + kfd_signal_event_interrupt(pasid, context_id & 
> > 0xff, 24);
> > + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
> > + kfd_signal_hw_exception_event(pasid);
> > + } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
> > +client_id == SOC15_IH_CLIENTID_SDMA1 ||
> > +client_id == SOC15_IH_

Re: [PATCH] drm/amdkfd: check both client id and src id in interrupt handlers

2020-12-18 Thread Felix Kuehling
Am 2020-12-18 um 4:34 p.m. schrieb Alex Deucher:
> We can have the same src ids for different client ids so make sure to
> check both the client id and the source id when handling interrupts.
>
> Signed-off-by: Alex Deucher 

Looks reasonable to me. Does this fix a real problem, e.g. KFD
intercepting an interrupt meant for another client?

Reviewed-by: Felix Kuehling 

> ---
>  .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c   | 46 ++-
>  1 file changed, 35 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
> index 241bd6ff79f4..0ca0327a39e5 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
> @@ -44,6 +44,21 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
>   client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
>   pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
>  
> + /* Only handle clients we care about */
> + if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
> + client_id != SOC15_IH_CLIENTID_SDMA0 &&
> + client_id != SOC15_IH_CLIENTID_SDMA1 &&
> + client_id != SOC15_IH_CLIENTID_SDMA2 &&
> + client_id != SOC15_IH_CLIENTID_SDMA3 &&
> + client_id != SOC15_IH_CLIENTID_SDMA4 &&
> + client_id != SOC15_IH_CLIENTID_SDMA5 &&
> + client_id != SOC15_IH_CLIENTID_SDMA6 &&
> + client_id != SOC15_IH_CLIENTID_SDMA7 &&
> + client_id != SOC15_IH_CLIENTID_VMC &&
> + client_id != SOC15_IH_CLIENTID_VMC1 &&
> + client_id != SOC15_IH_CLIENTID_UTCL2)
> + return false;
> +
>   /* This is a known issue for gfx9. Under non HWS, pasid is not set
>* in the interrupt payload, so we need to find out the pasid on our
>* own.
> @@ -96,17 +111,26 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
>   vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
>   context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
>  
> - if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
> - kfd_signal_event_interrupt(pasid, context_id, 32);
> - else if (source_id == SOC15_INTSRC_SDMA_TRAP)
> - kfd_signal_event_interrupt(pasid, context_id & 0xfff, 28);
> - else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
> - kfd_signal_event_interrupt(pasid, context_id & 0xff, 24);
> - else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
> - kfd_signal_hw_exception_event(pasid);
> - else if (client_id == SOC15_IH_CLIENTID_VMC ||
> - client_id == SOC15_IH_CLIENTID_VMC1 ||
> -  client_id == SOC15_IH_CLIENTID_UTCL2) {
> + if (client_id == SOC15_IH_CLIENTID_GRBM_CP) {
> + if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
> + kfd_signal_event_interrupt(pasid, context_id, 32);
> + else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
> + kfd_signal_event_interrupt(pasid, context_id & 
> 0xff, 24);
> + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
> + kfd_signal_hw_exception_event(pasid);
> + } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
> +client_id == SOC15_IH_CLIENTID_SDMA1 ||
> +client_id == SOC15_IH_CLIENTID_SDMA2 ||
> +client_id == SOC15_IH_CLIENTID_SDMA3 ||
> +client_id == SOC15_IH_CLIENTID_SDMA4 ||
> +client_id == SOC15_IH_CLIENTID_SDMA5 ||
> +client_id == SOC15_IH_CLIENTID_SDMA6 ||
> +client_id == SOC15_IH_CLIENTID_SDMA7) {
> + if (source_id == SOC15_INTSRC_SDMA_TRAP)
> + kfd_signal_event_interrupt(pasid, context_id & 
> 0xfff, 28);
> + } else if (client_id == SOC15_IH_CLIENTID_VMC ||
> +client_id == SOC15_IH_CLIENTID_VMC1 ||
> +client_id == SOC15_IH_CLIENTID_UTCL2) {
>   struct kfd_vm_fault_info info = {0};
>   uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
>  
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdkfd: check both client id and src id in interrupt handlers

2020-12-18 Thread Alex Deucher
We can have the same src ids for different client ids so make sure to
check both the client id and the source id when handling interrupts.

Signed-off-by: Alex Deucher 
---
 .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c   | 46 ++-
 1 file changed, 35 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 241bd6ff79f4..0ca0327a39e5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -44,6 +44,21 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
 
+   /* Only handle clients we care about */
+   if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
+   client_id != SOC15_IH_CLIENTID_SDMA0 &&
+   client_id != SOC15_IH_CLIENTID_SDMA1 &&
+   client_id != SOC15_IH_CLIENTID_SDMA2 &&
+   client_id != SOC15_IH_CLIENTID_SDMA3 &&
+   client_id != SOC15_IH_CLIENTID_SDMA4 &&
+   client_id != SOC15_IH_CLIENTID_SDMA5 &&
+   client_id != SOC15_IH_CLIENTID_SDMA6 &&
+   client_id != SOC15_IH_CLIENTID_SDMA7 &&
+   client_id != SOC15_IH_CLIENTID_VMC &&
+   client_id != SOC15_IH_CLIENTID_VMC1 &&
+   client_id != SOC15_IH_CLIENTID_UTCL2)
+   return false;
+
/* This is a known issue for gfx9. Under non HWS, pasid is not set
 * in the interrupt payload, so we need to find out the pasid on our
 * own.
@@ -96,17 +111,26 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
 
-   if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
-   kfd_signal_event_interrupt(pasid, context_id, 32);
-   else if (source_id == SOC15_INTSRC_SDMA_TRAP)
-   kfd_signal_event_interrupt(pasid, context_id & 0xfff, 28);
-   else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
-   kfd_signal_event_interrupt(pasid, context_id & 0xff, 24);
-   else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
-   kfd_signal_hw_exception_event(pasid);
-   else if (client_id == SOC15_IH_CLIENTID_VMC ||
-   client_id == SOC15_IH_CLIENTID_VMC1 ||
-client_id == SOC15_IH_CLIENTID_UTCL2) {
+   if (client_id == SOC15_IH_CLIENTID_GRBM_CP) {
+   if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+   kfd_signal_event_interrupt(pasid, context_id, 32);
+   else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
+   kfd_signal_event_interrupt(pasid, context_id & 
0xff, 24);
+   else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+   kfd_signal_hw_exception_event(pasid);
+   } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
+  client_id == SOC15_IH_CLIENTID_SDMA1 ||
+  client_id == SOC15_IH_CLIENTID_SDMA2 ||
+  client_id == SOC15_IH_CLIENTID_SDMA3 ||
+  client_id == SOC15_IH_CLIENTID_SDMA4 ||
+  client_id == SOC15_IH_CLIENTID_SDMA5 ||
+  client_id == SOC15_IH_CLIENTID_SDMA6 ||
+  client_id == SOC15_IH_CLIENTID_SDMA7) {
+   if (source_id == SOC15_INTSRC_SDMA_TRAP)
+   kfd_signal_event_interrupt(pasid, context_id & 
0xfff, 28);
+   } else if (client_id == SOC15_IH_CLIENTID_VMC ||
+  client_id == SOC15_IH_CLIENTID_VMC1 ||
+  client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
 
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix handling of irq domains on soc15 and newer GPUs

2020-12-18 Thread Christian König

Am 18.12.20 um 17:53 schrieb Alex Deucher:

We need to take into account the client id otherwise we'll end
up sending generic events for any src id that is registered.

We only support irq domains on pre-soc15 parts so client is
always legacy.


I've seen that multiple times as well but always forgot to ask if that's 
right or wrong.




Signed-off-by: Alex Deucher 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 3 ++-
  1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index bea57e8e793f..afbbec82a289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -444,7 +444,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
} else  if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
  
-	} else if (adev->irq.virq[src_id]) {

+   } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
+  adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
  
  	} else if (!adev->irq.client[client_id].sources) {


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix handling of irq domains on soc15 and newer GPUs

2020-12-18 Thread Alex Deucher
We need to take into account the client id otherwise we'll end
up sending generic events for any src id that is registered.

We only support irq domains on pre-soc15 parts so client is
always legacy.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index bea57e8e793f..afbbec82a289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -444,7 +444,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
} else  if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
 
-   } else if (adev->irq.virq[src_id]) {
+   } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
+  adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
 
} else if (!adev->irq.client[client_id].sources) {
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amd/display: enable idle optimizations for linux (MALL stutter)

2020-12-18 Thread Lakha, Bhawanpreet
[AMD Public Use]

Hi Kenneth,

The patches currently have the Kconfig so make sure to enable that first.

And dc_allow_idle_optimizations() will be called when all the vblank irqs are 
off. On Ubuntu, this will happen after the desktop is idle for a few seconds.

Bhawan

From: Feng, Kenneth 
Sent: December 18, 2020 12:46 AM
To: Lakha, Bhawanpreet ; Deucher, Alexander 
; Kazlauskas, Nicholas 
Cc: amd-gfx@lists.freedesktop.org 
Subject: RE: [PATCH 2/2] drm/amd/display: enable idle optimizations for linux 
(MALL stutter)


[AMD Public Use]


[AMD Public Use]



Hi Bhawanpreet,

With the two patches, I still can’t see that dc_allow_idle_optimizations(struct 
dc *dc, bool allow) is called with allow = true after modprobe and startx.

Is there anything else missing?



Best Regards

Kenneth



From: amd-gfx  On Behalf Of Lakha, 
Bhawanpreet
Sent: Friday, December 18, 2020 1:42 AM
To: Deucher, Alexander ; Kazlauskas, Nicholas 

Cc: amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 2/2] drm/amd/display: enable idle optimizations for linux 
(MALL stutter)



[AMD Public Use]



[CAUTION: External Email]

[AMD Public Use]



Actually, I will drop the guards and if we see issues related to this, we can 
block it.



Bhawan



From: Lakha, Bhawanpreet 
mailto:bhawanpreet.la...@amd.com>>
Sent: December 17, 2020 12:32 PM
To: Deucher, Alexander 
mailto:alexander.deuc...@amd.com>>; Kazlauskas, 
Nicholas mailto:nicholas.kazlaus...@amd.com>>
Cc: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Subject: Re: [PATCH 2/2] drm/amd/display: enable idle optimizations for linux 
(MALL stutter)



I would but MALL is not fully validated so it might cause underflow issues if 
we keep it enabled by default. I can create a feature flag mask if that helps?



Bhawan



From: Deucher, Alexander 
mailto:alexander.deuc...@amd.com>>
Sent: December 17, 2020 12:04 PM
To: Lakha, Bhawanpreet 
mailto:bhawanpreet.la...@amd.com>>; Kazlauskas, 
Nicholas mailto:nicholas.kazlaus...@amd.com>>
Cc: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Subject: Re: [PATCH 2/2] drm/amd/display: enable idle optimizations for linux 
(MALL stutter)



[AMD Public Use]



Can we drop the Kconfig?  With that, the series is:

Acked-by: Alex Deucher 
mailto:alexander.deuc...@amd.com>>



From: Bhawanpreet Lakha 
mailto:bhawanpreet.la...@amd.com>>
Sent: Thursday, December 17, 2020 11:54 AM
To: Kazlauskas, Nicholas 
mailto:nicholas.kazlaus...@amd.com>>; Deucher, 
Alexander mailto:alexander.deuc...@amd.com>>
Cc: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>; Lakha, 
Bhawanpreet mailto:bhawanpreet.la...@amd.com>>; 
Kazlauskas, Nicholas 
mailto:nicholas.kazlaus...@amd.com>>
Subject: [PATCH 2/2] drm/amd/display: enable idle optimizations for linux (MALL 
stutter)



[Why]
We can only use this feature when the display is idle. When active vblank
irq count is 0 we know all the displays are idle.

[How]
-Add a active vblank irq counter
-Update the counter when we enable/disable vblank irq
-if vblank irq count is 0 we can consider mall stutter

Change-Id: Ib1e14a84ee2e8c6e057072128693449665012584
Signed-off-by: Bhawanpreet Lakha 
mailto:bhawanpreet.la...@amd.com>>
Acked-by: Alex Deucher 
mailto:alexander.deuc...@amd.com>>
Reviewed-by: Nick Kazlauskas 
mailto:nicholas.kazlaus...@amd.com>>
---
 drivers/gpu/drm/amd/display/Kconfig   |  6 +
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 22 +++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  9 
 drivers/gpu/drm/amd/display/dc/dc.h   |  3 +++
 4 files changed, 40 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/Kconfig 
b/drivers/gpu/drm/amd/display/Kconfig
index 797b5d4b43e5..2444e664c7ee 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -23,6 +23,12 @@ config DRM_AMD_DC_HDCP
 help
   Choose this option if you want to support HDCP authentication.

+config DRM_AMD_DC_MALL
+   bool "Enable MALL support"
+   depends on DRM_AMD_DC
+   help
+ Choose this option if you want to support MALL
+
 config DRM_AMD_DC_SI
 bool "AMD DC support for Southern Islands ASICs"
 default n
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a78ec16418b3..080f2a52cfed 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5479,6 +5479,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 struct dm_crtc_state *acrtc_state =

Re: [PATCH] drm/amdgpu: Fix a copy-pasta comment

2020-12-18 Thread Nirmoy

Reviewed-by: Nirmoy Das 

On 12/18/20 5:20 PM, Alex Deucher wrote:

This is not a scsi driver.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +--
  1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f194fafa619d..e46646679281 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5070,8 +5070,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev 
*pdev)
   * @pdev: pointer to PCI device
   *
   * Called when the error recovery driver tells us that its
- * OK to resume normal operation. Use completion to allow
- * halted scsi ops to resume.
+ * OK to resume normal operation.
   */
  void amdgpu_pci_resume(struct pci_dev *pdev)
  {

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: Fix a copy-pasta comment

2020-12-18 Thread Alex Deucher
This is not a scsi driver.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f194fafa619d..e46646679281 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5070,8 +5070,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev 
*pdev)
  * @pdev: pointer to PCI device
  *
  * Called when the error recovery driver tells us that its
- * OK to resume normal operation. Use completion to allow
- * halted scsi ops to resume.
+ * OK to resume normal operation.
  */
 void amdgpu_pci_resume(struct pci_dev *pdev)
 {
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: only set DP subconnector type on DP and eDP connectors

2020-12-18 Thread Alex Deucher
Fixes a crash in drm_object_property_set_value() because the property
is not set for internal DP ports that connect to a bridge chips
(e.g., DP to VGA or DP to LVDS).

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=210739
Fixes: 65bf2cf95d3ade ("drm/amdgpu: utilize subconnector property for DP 
through atombios")
Tested-By: Kris Karas 
Cc: Oleg Vasilev 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 65d1b23d7e74..b9c11c2b2885 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1414,10 +1414,12 @@ amdgpu_connector_dp_detect(struct drm_connector 
*connector, bool force)
pm_runtime_put_autosuspend(connector->dev->dev);
}
 
-   drm_dp_set_subconnector_property(&amdgpu_connector->base,
-ret,
-amdgpu_dig_connector->dpcd,
-
amdgpu_dig_connector->downstream_ports);
+   if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+   connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+   drm_dp_set_subconnector_property(&amdgpu_connector->base,
+ret,
+amdgpu_dig_connector->dpcd,
+
amdgpu_dig_connector->downstream_ports);
return ret;
 }
 
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests

2020-12-18 Thread Nirmoy Das
Use amdgpu_sa_bo instead of amdgpu_bo.

v2:
* do not initialize bo to get hint from compiler for -Wuninitialized
* pass NULL fence to amdgpu_sa_bo_free if fence is undefined.

Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++---
 3 files changed, 45 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..13450a3df044 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser 
*parser, uint32_t ib_idx)
return 0;
 }

-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo 
*bo,
   bool direct, struct dma_fence **fence)
 {
struct amdgpu_device *adev = ring->adev;
@@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };

-   amdgpu_bo_kunmap(bo);
-   amdgpu_bo_unpin(bo);
-
-   if (!ring->adev->uvd.address_64_bit) {
-   struct ttm_operation_ctx ctx = { true, false };
-
-   amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
-   amdgpu_uvd_force_into_uvd_segment(bo);
-   r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-   if (r)
-   goto err;
-   }
-
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
 AMDGPU_IB_POOL_DELAYED, &job);
if (r)
@@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);

ib = &job->ibs[0];
-   addr = amdgpu_bo_gpu_offset(bo);
+   addr = amdgpu_sa_bo_gpu_addr(bo);
ib->ptr[0] = data[0];
ib->ptr[1] = addr;
ib->ptr[2] = data[1];
@@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring 
*ring, struct amdgpu_bo *bo,
ib->length_dw = 16;

if (direct) {
-   r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
-   true, false,
-   msecs_to_jiffies(10));
-   if (r == 0)
-   r = -ETIMEDOUT;
-   if (r < 0)
-   goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
} else {
-   r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
-AMDGPU_SYNC_ALWAYS,
-AMDGPU_FENCE_OWNER_UNDEFINED);
-   if (r)
-   goto err_free;
-
r = amdgpu_job_submit(job, &adev->uvd.entity,
  AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
}

-   amdgpu_bo_fence(bo, f, false);
-   amdgpu_bo_unreserve(bo);
-   amdgpu_bo_unref(&bo);
+   amdgpu_sa_bo_free(adev, &bo, f);

if (fence)
*fence = dma_fence_get(f);
@@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
amdgpu_job_free(job);

 err:
-   amdgpu_bo_unreserve(bo);
-   amdgpu_bo_unref(&bo);
+   amdgpu_sa_bo_free(adev, &bo, NULL);
return r;
 }

@@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, 
uint32_t handle,
  struct dma_fence **fence)
 {
struct amdgpu_device *adev = ring->adev;
-   struct amdgpu_bo *bo = NULL;
+   struct amdgpu_sa_bo *bo;
uint32_t *msg;
int r, i;

-   r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, (void **)&msg);
+   r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+&bo, 1024, PAGE_SIZE);
+
if (r)
return r;

+   msg = amdgpu_sa_bo_cpu_addr(bo);
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x0de4);
msg[1] = cpu_to_le32(0x);
@@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring 
*ring, uint32_t handle,
   bool direct, struct dma_fence **fence)
 {
struct amdgpu_device *adev = ring->adev;
-   struct amdgpu_bo *bo = NULL;
+   struct amdgpu_sa_bo *bo;
uint32_t *msg;
 

Re: [PATCH v3 05/12] drm/ttm: Expose ttm_tt_unpopulate for driver use

2020-12-18 Thread Daniel Vetter
On Thu, Dec 17, 2020 at 04:06:38PM -0500, Andrey Grodzovsky wrote:
> 
> On 12/17/20 3:48 PM, Daniel Vetter wrote:
> > On Thu, Dec 17, 2020 at 9:38 PM Andrey Grodzovsky
> >  wrote:
> > > 
> > > On 12/17/20 3:10 PM, Christian König wrote:
> > > > [SNIP]
> > > > > > > By eliminating such users, and replacing them with local maps 
> > > > > > > which
> > > > > > > > are strictly bound in how long they can exist (and hence we can
> > > > > > > > serialize against them finishing in our hotunplug code).
> > > > > > > Not sure I see how serializing against BO map/unmap helps - our 
> > > > > > > problem as
> > > > > > > you described is that once
> > > > > > > device is extracted and then something else quickly takes it's 
> > > > > > > place in the
> > > > > > > PCI topology
> > > > > > > and gets assigned same physical IO ranges, then our driver will 
> > > > > > > start
> > > > > > > accessing this
> > > > > > > new device because our 'zombie' BOs are still pointing to those 
> > > > > > > ranges.
> > > > > > Until your driver's remove callback is finished the ranges stay 
> > > > > > reserved.
> > > > > 
> > > > > The ranges stay reserved until unmapped which happens in bo->destroy
> > > > I'm not sure of that. Why do you think that?
> > > 
> > > Because of this sequence
> > > ttm_bo_release->destroy->amdgpu_bo_destroy->amdgpu_bo_kunmap->...->iounmap
> > > Is there another place I am missing ?
> > iounmap is just the mapping, it doesn't reserve anything in the resource 
> > tree.
> > 
> > And I don't think we should keep resources reserved past the pci
> > remove callback, because that would upset the pci subsystem trying to
> > assign resources to a newly hotplugged pci device.
> 
> 
> I assumed we are talking about VA ranges still mapped in the page table. I
> just assumed
> that part of ioremap is also reservation of the mapped physical ranges. In
> fact, if we
> do can explicitly reserve those ranges (as you mention here) then together
> with postponing
> system memory pages freeing/releasing back to the page pool until after BO
> is unmapped
> from the kernel address space I believe this could solve the issue of quick
> HW reinsertion
> and make all the drm_dev_ener/exit guarding obsolete.

We can't reserve these ranges, that's what I tried to explaine:
- kernel/resource.c isn't very consistently used
- the pci core will get pissed if there's suddenly a range in the middle
  of a bridge that it can't use
- nesting is allowed for resources, so this doesn't actually garuantee
  much

I just wanted to point out that ioremap does do any reserving, so not
enough by far.

We really have to stop using any mmio ranges before the pci remove
callback is finished.
-Daniel

> 
> Andrey
> 
> 
> > Also from a quick check amdgpu does not reserve the pci bars it's
> > using. Somehow most drm drivers don't do that, not exactly sure why,
> > maybe auto-enumeration of resources just works too good and we don't
> > need the safety net of kernel/resource.c anymore.
> > -Daniel
> > 
> > 
> > > > > which for most internally allocated buffers is during sw_fini when 
> > > > > last drm_put
> > > > > is called.
> > > > > 
> > > > > 
> > > > > > If that's not the case, then hotunplug would be fundamentally 
> > > > > > impossible
> > > > > > ot handle correctly.
> > > > > > 
> > > > > > Of course all the mmio actions will time out, so it might take some 
> > > > > > time
> > > > > > to get through it all.
> > > > > 
> > > > > I found that PCI code provides pci_device_is_present function
> > > > > we can use to avoid timeouts - it reads device vendor and checks if 
> > > > > all 1s is
> > > > > returned
> > > > > or not. We can call it from within register accessors before trying 
> > > > > read/write
> > > > That's way to much overhead! We need to keep that much lower or it will 
> > > > result
> > > > in quite a performance drop.
> > > > 
> > > > I suggest to rather think about adding drm_dev_enter/exit guards.
> > > 
> > > Sure, this one is just a bit upstream to the disconnect event. Eventually 
> > > none
> > > of them is watertight.
> > > 
> > > Andrey
> > > 
> > > 
> > > > Christian.
> > > > 
> > > > > > > Another point regarding serializing - problem  is that some of 
> > > > > > > those BOs are
> > > > > > > very long lived, take for example the HW command
> > > > > > > ring buffer Christian mentioned before -
> > > > > > > (amdgpu_ring_init->amdgpu_bo_create_kernel), it's life span
> > > > > > > is basically for the entire time the device exists, it's 
> > > > > > > destroyed only in
> > > > > > > the SW fini stage (when last drm_dev
> > > > > > > reference is dropped) and so should I grab it's dma_resv_lock from
> > > > > > > amdgpu_pci_remove code and wait
> > > > > > > for it to be unmapped before proceeding with the PCI remove code 
> > > > > > > ? This can
> > > > > > > take unbound time and that why I don't understand
> > > > > > > how serializing will help.
> > > > > > Uh you need to untangle that. After hw cleanup 

Re: [PATCH 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests

2020-12-18 Thread Christian König

Am 18.12.20 um 14:55 schrieb Nirmoy Das:

Use amdgpu_sa_bo instead of amdgpu_bo.

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++---
  3 files changed, 45 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..3c723e25dd5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser 
*parser, uint32_t ib_idx)
return 0;
  }
  
-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,

+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo 
*bo,
   bool direct, struct dma_fence **fence)
  {
struct amdgpu_device *adev = ring->adev;
@@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
  
-	amdgpu_bo_kunmap(bo);

-   amdgpu_bo_unpin(bo);
-
-   if (!ring->adev->uvd.address_64_bit) {
-   struct ttm_operation_ctx ctx = { true, false };
-
-   amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
-   amdgpu_uvd_force_into_uvd_segment(bo);
-   r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-   if (r)
-   goto err;
-   }
-
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
 AMDGPU_IB_POOL_DELAYED, &job);
if (r)
@@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
  
  	ib = &job->ibs[0];

-   addr = amdgpu_bo_gpu_offset(bo);
+   addr = amdgpu_sa_bo_gpu_addr(bo);
ib->ptr[0] = data[0];
ib->ptr[1] = addr;
ib->ptr[2] = data[1];
@@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring 
*ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
  
  	if (direct) {

-   r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
-   true, false,
-   msecs_to_jiffies(10));
-   if (r == 0)
-   r = -ETIMEDOUT;
-   if (r < 0)
-   goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
} else {
-   r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
-AMDGPU_SYNC_ALWAYS,
-AMDGPU_FENCE_OWNER_UNDEFINED);
-   if (r)
-   goto err_free;
-
r = amdgpu_job_submit(job, &adev->uvd.entity,
  AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
}
  
-	amdgpu_bo_fence(bo, f, false);

-   amdgpu_bo_unreserve(bo);
-   amdgpu_bo_unref(&bo);
+   amdgpu_sa_bo_free(adev, &bo, f);
  
  	if (fence)

*fence = dma_fence_get(f);
@@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
amdgpu_job_free(job);
  
  err:

-   amdgpu_bo_unreserve(bo);
-   amdgpu_bo_unref(&bo);
+   amdgpu_sa_bo_free(adev, &bo, f);


The fence is undefined here and should probably be replaced with NULL. 
Same for other places.



return r;
  }
  
@@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,

  struct dma_fence **fence)
  {
struct amdgpu_device *adev = ring->adev;
-   struct amdgpu_bo *bo = NULL;
+   struct amdgpu_sa_bo *bo = NULL;
uint32_t *msg;
int r, i;
  
-	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,

- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, (void **)&msg);
+   r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+&bo, 1024, PAGE_SIZE);
+
if (r)
return r;
  
+	msg = amdgpu_sa_bo_cpu_addr(bo);

/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x0de4);
msg[1] = cpu_to_le32(0x);
@@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring 
*ring, uint32_t handle,
   bool direct, struct dma_fence **fence)
  {
struct amdgpu_device *adev = ring->adev;
-   struct amdgpu_bo *bo = NULL;
+   struct amdgpu_sa_bo *bo = NULL;


Plea

[PATCH 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests

2020-12-18 Thread Nirmoy Das
Use amdgpu_sa_bo instead of amdgpu_bo.

Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++---
 3 files changed, 45 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..3c723e25dd5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser 
*parser, uint32_t ib_idx)
return 0;
 }
 
-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo 
*bo,
   bool direct, struct dma_fence **fence)
 {
struct amdgpu_device *adev = ring->adev;
@@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
 
-   amdgpu_bo_kunmap(bo);
-   amdgpu_bo_unpin(bo);
-
-   if (!ring->adev->uvd.address_64_bit) {
-   struct ttm_operation_ctx ctx = { true, false };
-
-   amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
-   amdgpu_uvd_force_into_uvd_segment(bo);
-   r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-   if (r)
-   goto err;
-   }
-
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
 AMDGPU_IB_POOL_DELAYED, &job);
if (r)
@@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
 
ib = &job->ibs[0];
-   addr = amdgpu_bo_gpu_offset(bo);
+   addr = amdgpu_sa_bo_gpu_addr(bo);
ib->ptr[0] = data[0];
ib->ptr[1] = addr;
ib->ptr[2] = data[1];
@@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring 
*ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
 
if (direct) {
-   r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
-   true, false,
-   msecs_to_jiffies(10));
-   if (r == 0)
-   r = -ETIMEDOUT;
-   if (r < 0)
-   goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
} else {
-   r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
-AMDGPU_SYNC_ALWAYS,
-AMDGPU_FENCE_OWNER_UNDEFINED);
-   if (r)
-   goto err_free;
-
r = amdgpu_job_submit(job, &adev->uvd.entity,
  AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
}
 
-   amdgpu_bo_fence(bo, f, false);
-   amdgpu_bo_unreserve(bo);
-   amdgpu_bo_unref(&bo);
+   amdgpu_sa_bo_free(adev, &bo, f);
 
if (fence)
*fence = dma_fence_get(f);
@@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, 
struct amdgpu_bo *bo,
amdgpu_job_free(job);
 
 err:
-   amdgpu_bo_unreserve(bo);
-   amdgpu_bo_unref(&bo);
+   amdgpu_sa_bo_free(adev, &bo, f);
return r;
 }
 
@@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, 
uint32_t handle,
  struct dma_fence **fence)
 {
struct amdgpu_device *adev = ring->adev;
-   struct amdgpu_bo *bo = NULL;
+   struct amdgpu_sa_bo *bo = NULL;
uint32_t *msg;
int r, i;
 
-   r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, (void **)&msg);
+   r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+&bo, 1024, PAGE_SIZE);
+
if (r)
return r;
 
+   msg = amdgpu_sa_bo_cpu_addr(bo);
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x0de4);
msg[1] = cpu_to_le32(0x);
@@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring 
*ring, uint32_t handle,
   bool direct, struct dma_fence **fence)
 {
struct amdgpu_device *adev = ring->adev;
-   struct amdgpu_bo *bo = NULL;
+   struct amdgpu_sa_bo *bo = NULL;
uint32_t *msg;
int r, i;
 
-   r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
-  

Re: [PATCH v2 1/1] drm/amdgpu: clean up bo in vce and vcn test

2020-12-18 Thread Nirmoy

Hi Christian,

On 12/17/20 3:25 PM, Christian König wrote:

Am 08.12.20 um 20:39 schrieb Nirmoy:


On 12/8/20 8:04 PM, Christian König wrote:

Am 08.12.20 um 19:59 schrieb Nirmoy Das:

BO created with amdgpu_bo_create_reserved() wasn't clean
properly before, which causes:

[   21.056218] WARNING: CPU: 0 PID: 7 at 
drivers/gpu/drm/ttm/ttm_bo.c:518 ttm_bo_release+0x2bf/0x310 [ttm]


[   21.056430] Call Trace:
[   21.056525]  amdgpu_bo_unref+0x1a/0x30 [amdgpu]
[   21.056635]  amdgpu_vcn_dec_send_msg+0x1b2/0x270 [amdgpu]
[   21.056740] amdgpu_vcn_dec_get_create_msg.constprop.0+0xd8/0x100 
[amdgpu]

[   21.056843]  amdgpu_vcn_dec_ring_test_ib+0x27/0x180 [amdgpu]
[   21.056936]  amdgpu_ib_ring_tests+0xf1/0x150 [amdgpu]
[   21.057024] amdgpu_device_delayed_init_work_handler+0x11/0x30 
[amdgpu]

[   21.057030]  process_one_work+0x1df/0x370
[   21.057033]  worker_thread+0x46/0x340
[   21.057034]  ? process_one_work+0x370/0x370
[   21.057037]  kthread+0x11b/0x140
[   21.057039]  ? __kthread_bind_mask+0x60/0x60
[   21.057043]  ret_from_fork+0x22/0x30

Signed-off-by: Nirmoy Das 


At some point we should probably switch to using an SA BO and stop 
this mess here.



I will look into this.


Please leave me a note when you can do this since we stumbled into 
problems with that.



I should have a patch ready by today.


Regards,

Nirmoy




Thanks,
Christian.

But for now the patch is Reviewed-by: Christian König 



Thanks,

Nirmoy




---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 9 ++---
  2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c

index ecaa2d7483b2..78a4dd9bf11f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1151,6 +1151,6 @@ int amdgpu_vce_ring_test_ib(struct 
amdgpu_ring *ring, long timeout)

  error:
  dma_fence_put(fence);
  amdgpu_bo_unreserve(bo);
-    amdgpu_bo_unref(&bo);
+    amdgpu_bo_free_kernel(&bo, NULL, NULL);
  return r;
  }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

index 7e19a6656715..921b81054c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -465,6 +465,7 @@ static int amdgpu_vcn_dec_send_msg(struct 
amdgpu_ring *ring,

  struct amdgpu_job *job;
  struct amdgpu_ib *ib;
  uint64_t addr;
+    void *msg = NULL;
  int i, r;
    r = amdgpu_job_alloc_with_ib(adev, 64,
@@ -474,6 +475,7 @@ static int amdgpu_vcn_dec_send_msg(struct 
amdgpu_ring *ring,

    ib = &job->ibs[0];
  addr = amdgpu_bo_gpu_offset(bo);
+    msg = amdgpu_bo_kptr(bo);
  ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
  ib->ptr[1] = addr;
  ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -492,7 +494,7 @@ static int amdgpu_vcn_dec_send_msg(struct 
amdgpu_ring *ring,

    amdgpu_bo_fence(bo, f, false);
  amdgpu_bo_unreserve(bo);
-    amdgpu_bo_unref(&bo);
+    amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
    if (fence)
  *fence = dma_fence_get(f);
@@ -505,7 +507,7 @@ static int amdgpu_vcn_dec_send_msg(struct 
amdgpu_ring *ring,

    err:
  amdgpu_bo_unreserve(bo);
-    amdgpu_bo_unref(&bo);
+    amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
  return r;
  }
  @@ -761,6 +763,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct 
amdgpu_ring *ring, long timeout)

  error:
  dma_fence_put(fence);
  amdgpu_bo_unreserve(bo);
-    amdgpu_bo_unref(&bo);
+    amdgpu_bo_free_kernel(&bo, NULL, NULL);
+
  return r;
  }



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=04%7C01%7CNirmoy.Das%40amd.com%7Ce0c007c753584ec9271f08d8a2978c2c%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637438119068272629%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=3wAWBMd5vHn2UoEBdhtO6CvlI0hrdg7HsFnqAbg3Jng%3D&reserved=0 




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx