Re: [PATCH] drm/amd/display: Fix mutex lock in dcn10

2022-09-30 Thread Hamza Mahfooz

Applied. Thanks!

On 2022-09-25 17:53, Daniel Gomez wrote:

Removal of DC_FP_* wrappers from dml (9696679bf7ac) provokes a mutex
lock [2] on the amdgpu driver. Re-arrange the dcn10 code to avoid
locking the mutex by placing the DC_FP_* wrappers around the proper
functions.

This fixes the following WARN/stacktrace:

BUG: sleeping function called from invalid context at kernel/locking/mutex.c:283
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 227, name: systemd-udevd
preempt_count: 1, expected: 0
CPU: 4 PID: 227 Comm: systemd-udevd Not tainted 6.0.0-rc6-qtec-standard #2
Hardware name: Qtechnology A/S QT5222/QT5221, BIOS v1.0.1 06/07/2021
Call Trace:
  
  dump_stack_lvl+0x33/0x42
  __might_resched.cold.172+0xa5/0xb3
  mutex_lock+0x1a/0x40
  amdgpu_dpm_get_clock_by_type_with_voltage+0x38/0x70 [amdgpu]
  dm_pp_get_clock_levels_by_type_with_voltage+0x64/0xa0 [amdgpu]
  dcn_bw_update_from_pplib+0x70/0x340 [amdgpu]
  dcn10_create_resource_pool+0x8c8/0xd20 [amdgpu]
  ? __kmalloc+0x1c7/0x4a0
  dc_create_resource_pool+0xe7/0x190 [amdgpu]
  dc_create+0x212/0x5d0 [amdgpu]
  amdgpu_dm_init+0x246/0x370 [amdgpu]
  ? schedule_hrtimeout_range_clock+0x93/0x120
  ? phm_wait_for_register_unequal.part.1+0x4a/0x80 [amdgpu]
  dm_hw_init+0xe/0x20 [amdgpu]
  amdgpu_device_init.cold.56+0x1324/0x1653 [amdgpu]
  ? pci_bus_read_config_word+0x43/0x80
  amdgpu_driver_load_kms+0x15/0x120 [amdgpu]
  amdgpu_pci_probe+0x116/0x320 [amdgpu]
  pci_device_probe+0x97/0x110
  really_probe+0xdd/0x340
  __driver_probe_device+0x80/0x170
  driver_probe_device+0x1f/0x90
  __driver_attach+0xdc/0x180
  ? __device_attach_driver+0x100/0x100
  ? __device_attach_driver+0x100/0x100
  bus_for_each_dev+0x74/0xc0
  bus_add_driver+0x19e/0x210
  ? kset_find_obj+0x30/0xa0
  ? 0xa0a5b000
  driver_register+0x6b/0xc0
  ? 0xa0a5b000
  do_one_initcall+0x4a/0x1f0
  ? __vunmap+0x28e/0x2f0
  ? __cond_resched+0x15/0x30
  ? kmem_cache_alloc_trace+0x3d/0x440
  do_init_module+0x4a/0x1e0
  load_module+0x1cba/0x1e10
  ? __do_sys_finit_module+0xb7/0x120
  __do_sys_finit_module+0xb7/0x120
  do_syscall_64+0x3c/0x80
  entry_SYSCALL_64_after_hwframe+0x63/0xcd
RIP: 0033:0x7ff2b5f5422d
Code: 5d c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48
89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 8b 0d c3 ab 0e 00 f7 d8 64 89 01 48
RSP: 002b:7ffc44ab28e8 EFLAGS: 0246 ORIG_RAX: 0139
RAX: ffda RBX: 555c566a9240 RCX: 7ff2b5f5422d
RDX:  RSI: 7ff2b60bb353 RDI: 0019
RBP: 7ff2b60bb353 R08:  R09: 555c566a9240
R10: 0019 R11: 0246 R12: 
R13: 0002 R14:  R15: 


Fixes: 9696679bf7ac ("drm/amd/display: remove DC_FP_* wrapper from
dml folder")
Signed-off-by: Daniel Gomez 
---
  .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  12 +-
  .../drm/amd/display/dc/dcn10/dcn10_resource.c |  66 +-
  .../drm/amd/display/dc/dml/calcs/dcn_calcs.c  | 118 --
  .../gpu/drm/amd/display/dc/inc/dcn_calcs.h|  19 ++-
  4 files changed, 138 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 5b5d952b2b8c..cb1e06d62841 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2994,6 +2994,7 @@ void dcn10_prepare_bandwidth(
  {
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+   int min_fclk_khz, min_dcfclk_khz, socclk_khz;
  
  	if (dc->debug.sanity_checks)

hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3016,8 +3017,11 @@ void dcn10_prepare_bandwidth(
  
  	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {

DC_FP_START();
-   dcn_bw_notify_pplib_of_wm_ranges(dc);
+   dcn_get_soc_clks(
+   dc, _fclk_khz, _dcfclk_khz, _khz);
DC_FP_END();
+   dcn_bw_notify_pplib_of_wm_ranges(
+   dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
  
  	if (dc->debug.sanity_checks)

@@ -3030,6 +3034,7 @@ void dcn10_optimize_bandwidth(
  {
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+   int min_fclk_khz, min_dcfclk_khz, socclk_khz;
  
  	if (dc->debug.sanity_checks)

hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3053,8 +3058,11 @@ void dcn10_optimize_bandwidth(
  
  	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {

DC_FP_START();
-   dcn_bw_notify_pplib_of_wm_ranges(dc);
+   dcn_get_soc_clks(
+   dc, _fclk_khz, _dcfclk_khz, _khz);
DC_FP_END();
+   dcn_bw_notify_pplib_of_wm_ranges(
+ 

Re: [PATCH] drm/amd/display: Fix mutex lock in dcn10

2022-09-26 Thread Christian König

Am 25.09.22 um 23:53 schrieb Daniel Gomez:

Removal of DC_FP_* wrappers from dml (9696679bf7ac) provokes a mutex
lock [2] on the amdgpu driver. Re-arrange the dcn10 code to avoid
locking the mutex by placing the DC_FP_* wrappers around the proper
functions.


Of hand that looks correct to me now, but our DC team needs to take a 
closer look.


Regards,
Christian.



This fixes the following WARN/stacktrace:

BUG: sleeping function called from invalid context at kernel/locking/mutex.c:283
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 227, name: systemd-udevd
preempt_count: 1, expected: 0
CPU: 4 PID: 227 Comm: systemd-udevd Not tainted 6.0.0-rc6-qtec-standard #2
Hardware name: Qtechnology A/S QT5222/QT5221, BIOS v1.0.1 06/07/2021
Call Trace:
  
  dump_stack_lvl+0x33/0x42
  __might_resched.cold.172+0xa5/0xb3
  mutex_lock+0x1a/0x40
  amdgpu_dpm_get_clock_by_type_with_voltage+0x38/0x70 [amdgpu]
  dm_pp_get_clock_levels_by_type_with_voltage+0x64/0xa0 [amdgpu]
  dcn_bw_update_from_pplib+0x70/0x340 [amdgpu]
  dcn10_create_resource_pool+0x8c8/0xd20 [amdgpu]
  ? __kmalloc+0x1c7/0x4a0
  dc_create_resource_pool+0xe7/0x190 [amdgpu]
  dc_create+0x212/0x5d0 [amdgpu]
  amdgpu_dm_init+0x246/0x370 [amdgpu]
  ? schedule_hrtimeout_range_clock+0x93/0x120
  ? phm_wait_for_register_unequal.part.1+0x4a/0x80 [amdgpu]
  dm_hw_init+0xe/0x20 [amdgpu]
  amdgpu_device_init.cold.56+0x1324/0x1653 [amdgpu]
  ? pci_bus_read_config_word+0x43/0x80
  amdgpu_driver_load_kms+0x15/0x120 [amdgpu]
  amdgpu_pci_probe+0x116/0x320 [amdgpu]
  pci_device_probe+0x97/0x110
  really_probe+0xdd/0x340
  __driver_probe_device+0x80/0x170
  driver_probe_device+0x1f/0x90
  __driver_attach+0xdc/0x180
  ? __device_attach_driver+0x100/0x100
  ? __device_attach_driver+0x100/0x100
  bus_for_each_dev+0x74/0xc0
  bus_add_driver+0x19e/0x210
  ? kset_find_obj+0x30/0xa0
  ? 0xa0a5b000
  driver_register+0x6b/0xc0
  ? 0xa0a5b000
  do_one_initcall+0x4a/0x1f0
  ? __vunmap+0x28e/0x2f0
  ? __cond_resched+0x15/0x30
  ? kmem_cache_alloc_trace+0x3d/0x440
  do_init_module+0x4a/0x1e0
  load_module+0x1cba/0x1e10
  ? __do_sys_finit_module+0xb7/0x120
  __do_sys_finit_module+0xb7/0x120
  do_syscall_64+0x3c/0x80
  entry_SYSCALL_64_after_hwframe+0x63/0xcd
RIP: 0033:0x7ff2b5f5422d
Code: 5d c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48
89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 8b 0d c3 ab 0e 00 f7 d8 64 89 01 48
RSP: 002b:7ffc44ab28e8 EFLAGS: 0246 ORIG_RAX: 0139
RAX: ffda RBX: 555c566a9240 RCX: 7ff2b5f5422d
RDX:  RSI: 7ff2b60bb353 RDI: 0019
RBP: 7ff2b60bb353 R08:  R09: 555c566a9240
R10: 0019 R11: 0246 R12: 
R13: 0002 R14:  R15: 


Fixes: 9696679bf7ac ("drm/amd/display: remove DC_FP_* wrapper from
dml folder")
Signed-off-by: Daniel Gomez 
---
  .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  12 +-
  .../drm/amd/display/dc/dcn10/dcn10_resource.c |  66 +-
  .../drm/amd/display/dc/dml/calcs/dcn_calcs.c  | 118 --
  .../gpu/drm/amd/display/dc/inc/dcn_calcs.h|  19 ++-
  4 files changed, 138 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 5b5d952b2b8c..cb1e06d62841 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2994,6 +2994,7 @@ void dcn10_prepare_bandwidth(
  {
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+   int min_fclk_khz, min_dcfclk_khz, socclk_khz;
  
  	if (dc->debug.sanity_checks)

hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3016,8 +3017,11 @@ void dcn10_prepare_bandwidth(
  
  	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {

DC_FP_START();
-   dcn_bw_notify_pplib_of_wm_ranges(dc);
+   dcn_get_soc_clks(
+   dc, _fclk_khz, _dcfclk_khz, _khz);
DC_FP_END();
+   dcn_bw_notify_pplib_of_wm_ranges(
+   dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
  
  	if (dc->debug.sanity_checks)

@@ -3030,6 +3034,7 @@ void dcn10_optimize_bandwidth(
  {
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+   int min_fclk_khz, min_dcfclk_khz, socclk_khz;
  
  	if (dc->debug.sanity_checks)

hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3053,8 +3058,11 @@ void dcn10_optimize_bandwidth(
  
  	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {

DC_FP_START();
-   dcn_bw_notify_pplib_of_wm_ranges(dc);
+   dcn_get_soc_clks(
+   dc, _fclk_khz, _dcfclk_khz, _khz);
   

[PATCH] drm/amd/display: Fix mutex lock in dcn10

2022-09-25 Thread Daniel Gomez
Removal of DC_FP_* wrappers from dml (9696679bf7ac) provokes a mutex
lock [2] on the amdgpu driver. Re-arrange the dcn10 code to avoid
locking the mutex by placing the DC_FP_* wrappers around the proper
functions.

This fixes the following WARN/stacktrace:

BUG: sleeping function called from invalid context at kernel/locking/mutex.c:283
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 227, name: systemd-udevd
preempt_count: 1, expected: 0
CPU: 4 PID: 227 Comm: systemd-udevd Not tainted 6.0.0-rc6-qtec-standard #2
Hardware name: Qtechnology A/S QT5222/QT5221, BIOS v1.0.1 06/07/2021
Call Trace:
 
 dump_stack_lvl+0x33/0x42
 __might_resched.cold.172+0xa5/0xb3
 mutex_lock+0x1a/0x40
 amdgpu_dpm_get_clock_by_type_with_voltage+0x38/0x70 [amdgpu]
 dm_pp_get_clock_levels_by_type_with_voltage+0x64/0xa0 [amdgpu]
 dcn_bw_update_from_pplib+0x70/0x340 [amdgpu]
 dcn10_create_resource_pool+0x8c8/0xd20 [amdgpu]
 ? __kmalloc+0x1c7/0x4a0
 dc_create_resource_pool+0xe7/0x190 [amdgpu]
 dc_create+0x212/0x5d0 [amdgpu]
 amdgpu_dm_init+0x246/0x370 [amdgpu]
 ? schedule_hrtimeout_range_clock+0x93/0x120
 ? phm_wait_for_register_unequal.part.1+0x4a/0x80 [amdgpu]
 dm_hw_init+0xe/0x20 [amdgpu]
 amdgpu_device_init.cold.56+0x1324/0x1653 [amdgpu]
 ? pci_bus_read_config_word+0x43/0x80
 amdgpu_driver_load_kms+0x15/0x120 [amdgpu]
 amdgpu_pci_probe+0x116/0x320 [amdgpu]
 pci_device_probe+0x97/0x110
 really_probe+0xdd/0x340
 __driver_probe_device+0x80/0x170
 driver_probe_device+0x1f/0x90
 __driver_attach+0xdc/0x180
 ? __device_attach_driver+0x100/0x100
 ? __device_attach_driver+0x100/0x100
 bus_for_each_dev+0x74/0xc0
 bus_add_driver+0x19e/0x210
 ? kset_find_obj+0x30/0xa0
 ? 0xa0a5b000
 driver_register+0x6b/0xc0
 ? 0xa0a5b000
 do_one_initcall+0x4a/0x1f0
 ? __vunmap+0x28e/0x2f0
 ? __cond_resched+0x15/0x30
 ? kmem_cache_alloc_trace+0x3d/0x440
 do_init_module+0x4a/0x1e0
 load_module+0x1cba/0x1e10
 ? __do_sys_finit_module+0xb7/0x120
 __do_sys_finit_module+0xb7/0x120
 do_syscall_64+0x3c/0x80
 entry_SYSCALL_64_after_hwframe+0x63/0xcd
RIP: 0033:0x7ff2b5f5422d
Code: 5d c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48
89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 8b 0d c3 ab 0e 00 f7 d8 64 89 01 48
RSP: 002b:7ffc44ab28e8 EFLAGS: 0246 ORIG_RAX: 0139
RAX: ffda RBX: 555c566a9240 RCX: 7ff2b5f5422d
RDX:  RSI: 7ff2b60bb353 RDI: 0019
RBP: 7ff2b60bb353 R08:  R09: 555c566a9240
R10: 0019 R11: 0246 R12: 
R13: 0002 R14:  R15: 


Fixes: 9696679bf7ac ("drm/amd/display: remove DC_FP_* wrapper from
dml folder")
Signed-off-by: Daniel Gomez 
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  12 +-
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |  66 +-
 .../drm/amd/display/dc/dml/calcs/dcn_calcs.c  | 118 --
 .../gpu/drm/amd/display/dc/inc/dcn_calcs.h|  19 ++-
 4 files changed, 138 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 5b5d952b2b8c..cb1e06d62841 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2994,6 +2994,7 @@ void dcn10_prepare_bandwidth(
 {
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+   int min_fclk_khz, min_dcfclk_khz, socclk_khz;
 
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3016,8 +3017,11 @@ void dcn10_prepare_bandwidth(
 
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
-   dcn_bw_notify_pplib_of_wm_ranges(dc);
+   dcn_get_soc_clks(
+   dc, _fclk_khz, _dcfclk_khz, _khz);
DC_FP_END();
+   dcn_bw_notify_pplib_of_wm_ranges(
+   dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
 
if (dc->debug.sanity_checks)
@@ -3030,6 +3034,7 @@ void dcn10_optimize_bandwidth(
 {
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+   int min_fclk_khz, min_dcfclk_khz, socclk_khz;
 
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3053,8 +3058,11 @@ void dcn10_optimize_bandwidth(
 
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
-   dcn_bw_notify_pplib_of_wm_ranges(dc);
+   dcn_get_soc_clks(
+   dc, _fclk_khz, _dcfclk_khz, _khz);
DC_FP_END();
+   dcn_bw_notify_pplib_of_wm_ranges(
+   dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
 
if