[PATCH 1/2] drm/edid: Add aspect ratios to HDMI 4K modes

2019-11-11 Thread Wayne Lin
[Why]
HDMI 2.0 adds aspect ratio attribute to distinguish different
4k modes. According to Appendix E of HDMI 2.0 spec, source should
use VSIF to indicate video mode only when the mode is one defined
in HDMI 1.4b 4K modes. Otherwise, use AVI infoframes to convey VIC.

Current code doesn't take aspect ratio into consideration while
constructing avi infoframe. Should modify that.

[How]
Inherit Ville Syrjälä's work
"drm/edid: Prep for HDMI VIC aspect ratio" at
https://patchwork.kernel.org/patch/11174639/

Add picture_aspect_ratio attributes to edid_4k_modes[] and
construct VIC and HDMI_VIC by taking aspect ratio into
consideration.

Signed-off-by: Wayne Lin 
---
 drivers/gpu/drm/drm_edid.c | 45 +-
 1 file changed, 35 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 77a39fc76045..fcd7ae29049d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1288,25 +1288,25 @@ static const struct drm_display_mode edid_4k_modes[] = {
   3840, 4016, 4104, 4400, 0,
   2160, 2168, 2178, 2250, 0,
   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, },
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 2 - 3840x2160@25Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
   3840, 4896, 4984, 5280, 0,
   2160, 2168, 2178, 2250, 0,
   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, },
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 3 - 3840x2160@24Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
   3840, 5116, 5204, 5500, 0,
   2160, 2168, 2178, 2250, 0,
   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 4096x2160@24Hz (SMPTE) */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
   4096, 5116, 5204, 5500, 0,
   2160, 2168, 2178, 2250, 0,
   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, HDMI_PICTURE_ASPECT_256_135},
 };
 
 /*** DDC fetch and block validation ***/
@@ -3110,6 +3110,11 @@ static enum hdmi_picture_aspect 
drm_get_cea_aspect_ratio(const u8 video_code)
return edid_cea_modes[video_code].picture_aspect_ratio;
 }
 
+static enum hdmi_picture_aspect drm_get_hdmi_aspect_ratio(const u8 video_code)
+{
+   return edid_4k_modes[video_code].picture_aspect_ratio;
+}
+
 /*
  * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
  * specific block).
@@ -3136,6 +3141,9 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const 
struct drm_display_mode *to_
if (!to_match->clock)
return 0;
 
+   if (to_match->picture_aspect_ratio)
+   match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
@@ -3171,6 +3179,9 @@ static u8 drm_match_hdmi_mode(const struct 
drm_display_mode *to_match)
if (!to_match->clock)
return 0;
 
+   if (to_match->picture_aspect_ratio)
+   match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
@@ -5118,6 +5129,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct 
hdmi_avi_infoframe *frame,
 const struct drm_display_mode *mode)
 {
enum hdmi_picture_aspect picture_aspect;
+   u8 vic, hdmi_vic;
int err;
 
if (!frame || !mode)
@@ -5130,7 +5142,8 @@ drm_hdmi_avi_infoframe_from_display_mode(struct 
hdmi_avi_infoframe *frame,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
 
-   frame->video_code = drm_mode_cea_vic(connector, mode);
+   vic = drm_mode_cea_vic(connector, mode);
+   hdmi_vic = drm_mode_hdmi_vic(connector, mode);
 
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 
@@ -5144,11 +5157,15 @@ drm_hdmi_avi_infoframe_from_display_mode(struct 
hdmi_avi_infoframe *frame,
 
/*
 * Populate picture aspect ratio from either
-* user input (if specified) or from the CEA mode list.
+* user input (if specified) or from the CEA/HDMI mode lists.
 */
picture_aspect = mode->picture_aspect_ratio;
-   if (picture_aspect == HDMI_PICTURE_ASPECT_NONE)
-   picture_aspect = drm_get_cea_aspect_ratio(frame->video_code);
+   if (picture_aspec

[PATCH 2/2] drm/edid: Add alternate clock for SMPTE 4K

2019-11-11 Thread Wayne Lin
[Why]
In hdmi_mode_alternate_clock(), it adds an exception for VIC 4
mode (4096x2160@24) due to there is no alternate clock defined for
that mode in HDMI1.4b. But HDMI2.0 adds 23.98Hz for that mode.

[How]
Remove the exception

Signed-off-by: Wayne Lin 
---
 drivers/gpu/drm/drm_edid.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fcd7ae29049d..ed2782c53a93 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3126,9 +3126,6 @@ static enum hdmi_picture_aspect 
drm_get_hdmi_aspect_ratio(const u8 video_code)
 static unsigned int
 hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
 {
-   if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
-   return hdmi_mode->clock;
-
return cea_mode_alternate_clock(hdmi_mode);
 }
 
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amd/powerplay: issue BTC on Navi during SMU setup

2019-11-11 Thread Evan Quan
RunBTC is added for Navi ASIC on hardware setup.

Change-Id: I1c04b481ed14d5f12c20b7b0d592b62a65889e4a
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 010be21bee5b..433acb0f459d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -1652,6 +1652,16 @@ static int navi10_update_pcie_parameters(struct 
smu_context *smu,
return ret;
 }
 
+static int navi10_run_btc(struct smu_context *smu)
+{
+   int ret = 0;
+
+   ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+   if (ret)
+   pr_err("RunBtc failed!\n");
+
+   return ret;
+}
 
 static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
@@ -1741,6 +1751,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+   .run_btc = navi10_run_btc,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

2019-11-11 Thread Deng, Emily
Hi Christian,
I add the follow print in function drm_sched_cleanup_jobs. From the log it 
shows that only use cancel_delayed_work could not avoid to free job when the 
sched is in reset. But don't know exactly where it is wrong about the driver. 
Do you have any suggestion about this?

+   printk("Emily:drm_sched_cleanup_jobs:begin,tid:%lu, pid:%lu\n", 
current->tgid, current->pid);

/*
 * Don't destroy jobs while the timeout worker is running  OR thread
 * is being parked and hence assumed to not touch ring_mirror_list
 */
 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)))
return;
+   printk("Emily:drm_sched_cleanup_jobs,tid:%lu, pid:%lu\n", 
current->tgid, current->pid);


Best wishes
Emily Deng

Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11380.695091] 
Emily:drm_sched_cleanup_jobs:begin,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11380.695104] 
Emily:drm_sched_cleanup_jobs:begin,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11380.695105] 
Emily:drm_sched_cleanup_jobs,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11380.695107] 
Emily:drm_sched_cleanup_jobs:begin,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11380.695107] 
Emily:drm_sched_cleanup_jobs,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.222954] 
[drm:amdgpu_job_timedout [amdgpu]] *ERROR* ring sdma0 timeout, signaled 
seq=78585, emitted seq=78587
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.224275] 
[drm:amdgpu_job_timedout [amdgpu]] *ERROR* Process information: process  pid 0 
thread  pid 0, s_job:fe75ab36,tid=15603, pid=15603
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225413] 
amdgpu :00:08.0: GPU reset begin!
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225417] 
Emily:drm_sched_cleanup_jobs:begin,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225425] 
Emily:drm_sched_cleanup_jobs:begin,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225425] 
Emily:drm_sched_cleanup_jobs,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225428] 
Emily:amdgpu_job_free_cb,Process information: process  pid 0 thread  pid 0, 
s_job:fe75ab36, tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225429] 
Emily:drm_sched_cleanup_jobs:begin,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225430] 
Emily:drm_sched_cleanup_jobs,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225473] 
Emily:drm_sched_cleanup_jobs:begin,tid:2253, pid:2253
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225486] 
Emily:drm_sched_cleanup_jobs:begin,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225489] 
Emily:drm_sched_cleanup_jobs,tid:2262, pid:2262
Nov 12 12:58:20 ubuntu-drop-August-2018-rc2-gpu0-vf02 kernel: [11381.225494] 
Emily:amdgpu_job_free_cb,Process information: process  pid 0 thread  pid 0, 
s_job:f086ec84, tid:2262, pid:2262
>-Original Message-
>From: Grodzovsky, Andrey 
>Sent: Tuesday, November 12, 2019 11:28 AM
>To: Koenig, Christian ; Deng, Emily
>; amd-gfx@lists.freedesktop.org
>Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>
>Thinking more about this claim - we assume here that if cancel_delayed_work
>returned true it guarantees that timeout work is not running but, it merely
>means there was a pending timeout work which was removed from the
>workqueue before it's timer elapsed and so it didn't have a chance to be
>dequeued and executed, it doesn't cover already executing work. So there is a
>possibility where while timeout work started executing another timeout work
>already got enqueued (maybe through earlier cleanup jobs or through
>drm_sched_fault) and if at this point another drm_sched_cleanup_jobs runs
>cancel_delayed_work(&sched->work_tdr) will return true even while there is a
>timeout job in progress.
>Unfortunately we cannot change cancel_delayed_work to
>cancel_delayed_work_sync to flush the timeout work as timeout work itself
>waits for schedule thread  to be parked again when calling park_thread.
>
>Andrey
>
>
>From: amd-gfx  on behalf of
>Koenig, Christian 
>Sent: 08 November 2019 05:35:18
>To: Deng, Emily; amd-gfx@lists.freedesktop.org
>Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>
>Hi Emily,
>
>exactly that can't happen. See here:
>
>> /* Don't destroy jobs while the timeout worker is running */
>> if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&

RE: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

2019-11-11 Thread Deng, Emily
Hi Andrey,
 On my side, it doesn't need to a specific scenario, I only run the quark 
with slow job. Then sometimes, it will have fake hang and hardware fence will 
back. For this case, it will randomly occur the NULL pointer issue in 
amdgpu_device_gpu_recover.

>-Original Message-
>From: Grodzovsky, Andrey 
>Sent: Tuesday, November 12, 2019 5:35 AM
>To: Deng, Emily ; Koenig, Christian
>; amd-gfx@lists.freedesktop.org
>Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>
>Emily - is there a particular scenario to reproduce this ? I am trying with 
>libdrm
>deadlock test and artificially delaying the GPU reset logic until after the 
>guilty
>job is signaling but indeed nothing bad happens as drm_sched_cleanup_jobs
>returns early because there is a reset in progress and so the bad job is not
>getting released while GPU reset is running.
>
>Can you provide event tracing for timer, dma_fence and gpu_scheduler for
>when the problem happens ?
>
>Andrey
>
>On 11/11/19 4:05 AM, Deng, Emily wrote:
>> Hi Christian and Andrey,
>>   The issue I encountered is the bad job is freeing after entering to the
>amdgpu_device_gpu_recover. Don't know why, as per Christian said, it will
>call cancel_delayed_work in drm_sched_cleanup_jobs.
>>
>> Best wishes
>> Emily Deng
>>
>>
>>
>>> -Original Message-
>>> From: amd-gfx  On Behalf Of
>>> Deng, Emily
>>> Sent: Monday, November 11, 2019 3:19 PM
>>> To: Grodzovsky, Andrey ; Koenig,
>Christian
>>> ; amd-gfx@lists.freedesktop.org
>>> Subject: RE: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>>>
>>> Hi Andrey,
>>> I don’t think your patch will help for this. As it will may call
>>> kthread_should_park in drm_sched_cleanup_jobs first, and then call
>>> kcl_kthread_park. And then it still has a race between the 2 threads.
>>>
>>> Best wishes
>>> Emily Deng
>>>
>>>
>>>
 -Original Message-
 From: Grodzovsky, Andrey 
 Sent: Saturday, November 9, 2019 3:01 AM
 To: Koenig, Christian ; Deng, Emily
 ; amd-gfx@lists.freedesktop.org
 Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr


 On 11/8/19 5:35 AM, Koenig, Christian wrote:
> Hi Emily,
>
> exactly that can't happen. See here:
>
>>       /* Don't destroy jobs while the timeout worker is
>> running */
>>       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
>>       !cancel_delayed_work(&sched->work_tdr))
>>       return NULL;
> We never free jobs while the timeout working is running to prevent
> exactly that issue.

 I don't think this protects us if drm_sched_cleanup_jobs is called
 for scheduler which didn't experience a timeout, in
 amdgpu_device_gpu_recover we access
 sched->ring_mirror_list for all the schedulers on a device so this
 sched->condition
 above won't protect us. What in fact could help maybe is my recent
 patch
 541c521 drm/sched: Avoid job cleanup if sched thread is parked.
 because we do park each of the scheduler threads during tdr job
 before trying to access
 sched->ring_mirror_list.

 Emily - did you see this problem with that patch in place ? I only
 pushed it yesterday.

 Andrey


> Regards,
> Christian.
>
> Am 08.11.19 um 11:32 schrieb Deng, Emily:
>> Hi Christian,
>> The drm_sched_job_timedout-> amdgpu_job_timedout call
 amdgpu_device_gpu_recover. I mean the main scheduler free the jobs
 while in amdgpu_device_gpu_recover, and before calling
>drm_sched_stop.
>> Best wishes
>> Emily Deng
>>
>>
>>
>>> -Original Message-
>>> From: Koenig, Christian 
>>> Sent: Friday, November 8, 2019 6:26 PM
>>> To: Deng, Emily ; amd-
>>> g...@lists.freedesktop.org
>>> Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for
>>> tdr
>>>
>>> Hi Emily,
>>>
>>> well who is calling amdgpu_device_gpu_recover() in this case?
>>>
>>> When it's not the scheduler we shouldn't have a guilty job in the
>>> first
>>> place.
>>> Regards,
>>> Christian.
>>>
>>> Am 08.11.19 um 11:22 schrieb Deng, Emily:
 Hi Chrisitan,
  No, I am with the new branch and also has the patch.
 Even it are freed by
>>> main scheduler, how we could avoid main scheduler to free jobs
>>> while enter to function amdgpu_device_gpu_recover?
 Best wishes
 Emily Deng



> -Original Message-
> From: Koenig, Christian 
> Sent: Friday, November 8, 2019 6:15 PM
> To: Deng, Emily ;
> amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for
> tdr
>
> Hi Emily,
>
> in this case you are on an old code branch.
>
> Jobs are freed now by the main scheduler threa

Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

2019-11-11 Thread Grodzovsky, Andrey
Thinking more about this claim - we assume here that if cancel_delayed_work 
returned true it guarantees that timeout work is not running but, it merely 
means there was a pending timeout work which was removed from the workqueue 
before it's timer elapsed and so it didn't have a chance to be dequeued and 
executed, it doesn't cover already executing work. So there is a possibility 
where while timeout work started executing another timeout work already got 
enqueued (maybe through earlier cleanup jobs or through drm_sched_fault) and if 
at this point another drm_sched_cleanup_jobs runs 
cancel_delayed_work(&sched->work_tdr) will return true even while there is a 
timeout job in progress.
Unfortunately we cannot change cancel_delayed_work to cancel_delayed_work_sync 
to flush the timeout work as timeout work itself waits for schedule thread  to 
be parked again when calling park_thread.

Andrey


From: amd-gfx  on behalf of Koenig, 
Christian 
Sent: 08 November 2019 05:35:18
To: Deng, Emily; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

Hi Emily,

exactly that can't happen. See here:

> /* Don't destroy jobs while the timeout worker is running */
> if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> !cancel_delayed_work(&sched->work_tdr))
> return NULL;

We never free jobs while the timeout working is running to prevent
exactly that issue.

Regards,
Christian.

Am 08.11.19 um 11:32 schrieb Deng, Emily:
> Hi Christian,
>   The drm_sched_job_timedout-> amdgpu_job_timedout call 
> amdgpu_device_gpu_recover. I mean the main scheduler free the jobs while in 
> amdgpu_device_gpu_recover, and before calling drm_sched_stop.
>
> Best wishes
> Emily Deng
>
>
>
>> -Original Message-
>> From: Koenig, Christian 
>> Sent: Friday, November 8, 2019 6:26 PM
>> To: Deng, Emily ; amd-gfx@lists.freedesktop.org
>> Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>>
>> Hi Emily,
>>
>> well who is calling amdgpu_device_gpu_recover() in this case?
>>
>> When it's not the scheduler we shouldn't have a guilty job in the first 
>> place.
>>
>> Regards,
>> Christian.
>>
>> Am 08.11.19 um 11:22 schrieb Deng, Emily:
>>> Hi Chrisitan,
>>>No, I am with the new branch and also has the patch. Even it are 
>>> freed by
>> main scheduler, how we could avoid main scheduler to free jobs while enter
>> to function amdgpu_device_gpu_recover?
>>> Best wishes
>>> Emily Deng
>>>
>>>
>>>
 -Original Message-
 From: Koenig, Christian 
 Sent: Friday, November 8, 2019 6:15 PM
 To: Deng, Emily ; amd-gfx@lists.freedesktop.org
 Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

 Hi Emily,

 in this case you are on an old code branch.

 Jobs are freed now by the main scheduler thread and only if no
 timeout handler is running.

 See this patch here:
> commit 5918045c4ed492fb5813f980dcf89a90fefd0a4e
> Author: Christian König 
> Date:   Thu Apr 18 11:00:21 2019 -0400
>
>   drm/scheduler: rework job destruction
 Regards,
 Christian.

 Am 08.11.19 um 11:11 schrieb Deng, Emily:
> Hi Christian,
> Please refer to follow log, when it enter to
> amdgpu_device_gpu_recover
 function, the bad job 5086879e is freeing in function
 amdgpu_job_free_cb  at the same time, because of the hardware fence
>> signal.
 But amdgpu_device_gpu_recover goes faster, at this case, the s_fence
 is already freed, but job is not freed in time. Then this issue occurs.
> [  449.792189] [drm:amdgpu_job_timedout [amdgpu]] *ERROR* ring
>> sdma0
> timeout, signaled seq=2481, emitted seq=2483 [  449.793202]
> [drm:amdgpu_job_timedout [amdgpu]] *ERROR* Process information:
 process  pid 0 thread  pid 0, s_job:5086879e [  449.794163]
 amdgpu
 :00:08.0: GPU reset begin!
> [  449.794175] Emily:amdgpu_job_free_cb,Process information: process
> pid 0 thread  pid 0, s_job:5086879e [  449.794221]
> Emily:amdgpu_job_free_cb,Process information: process  pid 0 thread
> pid 0, s_job:66eb74ab [  449.794222]
> Emily:amdgpu_job_free_cb,Process information: process  pid 0 thread
> pid 0, s_job:d4438ad9 [  449.794255]
> Emily:amdgpu_job_free_cb,Process information: process  pid 0 thread
> pid 0, s_job:b6d69c65 [  449.794257]
> Emily:amdgpu_job_free_cb,Process information: process  pid 0 thread
> pid 0,
 s_job:ea85e922 [  449.794287]
 Emily:amdgpu_job_free_cb,Process
 information: process  pid 0 thread  pid 0, s_job:ed3a5ac6 [
 449.794366] BUG: unable to handle kernel NULL pointer dereference at
 00c0 [  449.800818] PGD 0 P4D 0 [  449.801040] Oops: 
 [#1] SMP PTI
> [  449.801338] CPU: 3 PID: 55 Comm: kworker/3:1 Tain

[PATCH] drm/amd/powerplay: avoid DPM reenable process on Navi1x ASICs V2

2019-11-11 Thread Evan Quan
Otherwise, without RLC reinitialization, the DPM reenablement
will fail. That affects the custom pptable uploading.

V2: setting/clearing uploading_custom_pp_table in
smu_sys_set_pp_table()

Change-Id: I6fe2ed5ce23f2a5b66f371c0b6d1f924837e5af6
Reported-by: Matt Coffin 
Signed-off-by: Evan Quan 
Tested-by: Matt Coffin 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 31 ---
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
 2 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 76a4154b3be2..54c21f5a1861 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -591,10 +591,18 @@ int smu_sys_set_pp_table(struct smu_context *smu,  void 
*buf, size_t size)
smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size;
 
+   /*
+* Special hw_fini action(for Navi1x, the DPMs disablement will be
+* skipped) may be needed for custom pptable uploading.
+*/
+   smu->uploading_custom_pp_table = true;
+
ret = smu_reset(smu);
if (ret)
pr_info("smu reset failed, ret = %d\n", ret);
 
+   smu->uploading_custom_pp_table = false;
+
 failed:
mutex_unlock(&smu->mutex);
return ret;
@@ -1293,10 +1301,25 @@ static int smu_hw_fini(void *handle)
return ret;
}
 
-   ret = smu_stop_dpms(smu);
-   if (ret) {
-   pr_warn("Fail to stop Dpms!\n");
-   return ret;
+   /*
+* For custom pptable uploading, skip the DPM features
+* disable process on Navi1x ASICs.
+*   - As the gfx related features are under control of
+* RLC on those ASICs. RLC reinitialization will be
+* needed to reenable them. That will cost much more
+* efforts.
+*
+*   - SMU firmware can handle the DPM reenablement
+* properly.
+*/
+   if (!smu->uploading_custom_pp_table ||
+   !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+   ret = smu_stop_dpms(smu);
+   if (ret) {
+   pr_warn("Fail to stop Dpms!\n");
+   return ret;
+   }
}
 
kfree(table_context->driver_pptable);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 8120e7587585..215841f5fb93 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -391,6 +391,7 @@ struct smu_context
 
uint32_t smc_if_version;
 
+   bool uploading_custom_pp_table;
 };
 
 struct i2c_adapter;
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/powerplay: avoid DPM reenable process on Navi1x ASICs

2019-11-11 Thread Quan, Evan


> -Original Message-
> From: Alex Deucher 
> Sent: Tuesday, November 12, 2019 1:34 AM
> To: Quan, Evan 
> Cc: amd-gfx list 
> Subject: Re: [PATCH] drm/amd/powerplay: avoid DPM reenable process on
> Navi1x ASICs
> 
> On Mon, Nov 11, 2019 at 4:25 AM Evan Quan  wrote:
> >
> > Otherwise, without RLC reinitialization, the DPM reenablement will
> > fail. That affects the custom pptable uploading.
> >
> > Change-Id: I6fe2ed5ce23f2a5b66f371c0b6d1f924837e5af6
> > Signed-off-by: Evan Quan 
> > ---
> >  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 32 +++--
> --
> >  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
> >  2 files changed, 26 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index 76a4154b3be2..a4d67b30fd72 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -1293,10 +1293,25 @@ static int smu_hw_fini(void *handle)
> > return ret;
> > }
> >
> > -   ret = smu_stop_dpms(smu);
> > -   if (ret) {
> > -   pr_warn("Fail to stop Dpms!\n");
> > -   return ret;
> > +   /*
> > +* For custom pptable uploading, skip the DPM features
> > +* disable process on Navi1x ASICs.
> > +*   - As the gfx related features are under control of
> > +* RLC on those ASICs. RLC reinitialization will be
> > +* needed to reenable them. That will cost much more
> > +* efforts.
> > +*
> > +*   - SMU firmware can handle the DPM reenablement
> > +* properly.
> > +*/
> > +   if (!smu->uploading_custom_pp_table ||
> > +   !((adev->asic_type >= CHIP_NAVI10) &&
> > + (adev->asic_type <= CHIP_NAVI12))) {
> > +   ret = smu_stop_dpms(smu);
> > +   if (ret) {
> > +   pr_warn("Fail to stop Dpms!\n");
> > +   return ret;
> > +   }
> > }
> >
> > kfree(table_context->driver_pptable);
> > @@ -1324,13 +1339,16 @@ int smu_reset(struct smu_context *smu)
> > struct amdgpu_device *adev = smu->adev;
> > int ret = 0;
> >
> > +   smu->uploading_custom_pp_table = true;
> > +
> 
> Do we need to differentiate between reloading for a pptable update and
> reloading for a gpu reset or suspend/resume or is that already handled?
[Quan, Evan] That was considered(and as I verified suspend/resume was fine).  
For gpu reset or suspend/resume, the rlc/gfx will be reinitialized.
So, under those cases, it's OK to perform the dpms disablement.
> Shouldn't we be setting/clearing uploading_custom_pp_table in
> smu_sys_set_pp_table() around the call to smu_reset()?
[Quan, Evan] That's a good point. Will update this in V2.
> 
> Alex
> 
> > ret = smu_hw_fini(adev);
> > if (ret)
> > -   return ret;
> > +   goto out;
> >
> > ret = smu_hw_init(adev);
> > -   if (ret)
> > -   return ret;
> > +
> > +out:
> > +   smu->uploading_custom_pp_table = false;
> >
> > return ret;
> >  }
> > diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > index 8120e7587585..215841f5fb93 100644
> > --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > @@ -391,6 +391,7 @@ struct smu_context
> >
> > uint32_t smc_if_version;
> >
> > +   bool uploading_custom_pp_table;
> >  };
> >
> >  struct i2c_adapter;
> > --
> > 2.24.0
> >
> > ___
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 30/30] drm/amd/display: Add Navi10 DMUB VBIOS code

2019-11-11 Thread Rodrigo Siqueira
From: Nicholas Kazlauskas 

[Why]
We need some extra dmub_cmd_type for NV10

[How]
Add command table functions in DMUB firmware.

Signed-off-by: Nicholas Kazlauskas 
Signed-off-by: Xiong Yan 
Reviewed-by: Tony Cheng 
Acked-by: Nicholas Kazlauskas 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b25f92e3280d..43f1cd647aab 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -45,6 +45,17 @@ enum dmub_cmd_type {
DMUB_CMD__ENABLE_DISP_POWER_GATING,
DMUB_CMD__DPPHY_INIT,
DMUB_CMD__DIG1_TRANSMITTER_CONTROL,
+   DMUB_CMD__SETUP_DISPLAY_MODE,
+   DMUB_CMD__BLANK_CRTC,
+   DMUB_CMD__ENABLE_DISPPATH,
+   DMUB_CMD__DISABLE_DISPPATH,
+   DMUB_CMD__DISABLE_DISPPATH_OUTPUT,
+   DMUB_CMD__READ_DISPPATH_EDID,
+   DMUB_CMD__DP_PRE_LINKTRAINING,
+   DMUB_CMD__INIT_CONTROLLER,
+   DMUB_CMD__RESET_CONTROLLER,
+   DMUB_CMD__SET_BRI_LEVEL,
+   DMUB_CMD__LVTMA_CONTROL,
 
// PSR
DMUB_CMD__PSR_ENABLE,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 19/30] drm/amd/display: Adjust DML workaround threshold

2019-11-11 Thread Rodrigo Siqueira
From: Joshua Aberback 

[Why]
There is a case where the margin is between 50 and 60, but applying the
workaround causes a hang. By increasing the threshold, we are blocking more
cases from switching p-state during active, but those cases will fall back
to switching during blank, which is fine.

[How]
 - increase required margin from 50 to 60

Signed-off-by: Joshua Aberback 
Reviewed-by: Aric Cyr 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c  | 2 +-
 .../gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 77b7574c63cb..3b224b155e8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -2578,7 +2578,7 @@ static void 
dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
+ mode_lib->vba.DRAMClockChangeLatency;
 
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
-   mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+   mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = 
dm_dram_clock_change_vactive;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 62dfd36d830a..6482d7b99bae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2612,7 +2612,7 @@ static void 
dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
+ mode_lib->vba.DRAMClockChangeLatency;
 
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
-   mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+   mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = 
dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 29/30] drm/amd/display: Add DSC 422Native debug option

2019-11-11 Thread Rodrigo Siqueira
From: Ilya Bakoulin 

[Why]
Need to be able to enable native 422 for debugging purposes.

[How]
Add new dc_debug_options bool and check it in the get_dsc_enc_caps
function.

Signed-off-by: Ilya Bakoulin 
Reviewed-by: Charlene Liu 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 1 +
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 6 +-
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index f30c77e44bb4..3e6133f8cdc4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -362,6 +362,7 @@ struct dc_debug_options {
bool disable_hubp_power_gate;
bool disable_dsc_power_gate;
int dsc_min_slice_height_override;
+   bool native422_support;
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
unsigned int min_disp_clk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index dabd3b7a4cdc..ec86ba73a039 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -25,6 +25,7 @@
 #include "dc_hw_types.h"
 #include "dsc.h"
 #include 
+#include "dc.h"
 
 struct dc_dsc_policy {
bool use_min_slices_h;
@@ -236,8 +237,11 @@ static void get_dsc_enc_caps(
// This is a static HW query, so we can use any DSC
 
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
-   if (dsc)
+   if (dsc) {
dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+   if (dsc->ctx->dc->debug.native422_support)
+   dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+   }
 }
 
 /* Returns 'false' if no intersection was found for at least one capablity.
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 28/30] drm/amd/display: Use a temporary copy of the current state when updating DSC config

2019-11-11 Thread Rodrigo Siqueira
From: Nikola Cornij 

[why]
When updating DSC config, a new config has to be validated before proceeding
with applying the update. Validation, however, modifies the current state.
This means DSC config validation would affect pipe re-assignment, causing
intermittent screen corruption issues when ODM is required for DSC.

[how]
- Use a copy of the current state for modified DSC config validation
- Set the update type to FULL_UPDATE to correctly validate and set the
  actual state used for committing the streams

Signed-off-by: Nikola Cornij 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 31 ++--
 1 file changed, 24 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 60bc4c3a518e..ed3fd41c0879 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1678,6 +1678,9 @@ static enum surface_update_type 
check_update_surfaces_for_stream(
 
if (stream_update->output_csc_transform || 
stream_update->output_color_space)
su_flags->bits.out_csc = 1;
+
+   if (stream_update->dsc_config)
+   overall_type = UPDATE_TYPE_FULL;
}
 
for (i = 0 ; i < surface_count; i++) {
@@ -1869,8 +1872,10 @@ static void copy_surface_update_to_plane(
 static void copy_stream_update_to_stream(struct dc *dc,
 struct dc_state *context,
 struct dc_stream_state *stream,
-const struct dc_stream_update *update)
+struct dc_stream_update *update)
 {
+   struct dc_context *dc_ctx = dc->ctx;
+
if (update == NULL || stream == NULL)
return;
 
@@ -1947,12 +1952,24 @@ static void copy_stream_update_to_stream(struct dc *dc,
uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
   update->dsc_config->num_slices_v != 0);
 
-   stream->timing.dsc_cfg = *update->dsc_config;
-   stream->timing.flags.DSC = enable_dsc;
-   if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
-true)) {
-   stream->timing.dsc_cfg = old_dsc_cfg;
-   stream->timing.flags.DSC = old_dsc_enabled;
+   /* Use temporarry context for validating new DSC config */
+   struct dc_state *dsc_validate_context = dc_create_state(dc);
+
+   if (dsc_validate_context) {
+   dc_resource_state_copy_construct(dc->current_state, 
dsc_validate_context);
+
+   stream->timing.dsc_cfg = *update->dsc_config;
+   stream->timing.flags.DSC = enable_dsc;
+   if (!dc->res_pool->funcs->validate_bandwidth(dc, 
dsc_validate_context, true)) {
+   stream->timing.dsc_cfg = old_dsc_cfg;
+   stream->timing.flags.DSC = old_dsc_enabled;
+   update->dsc_config = false;
+   }
+
+   dc_release_state(dsc_validate_context);
+   } else {
+   DC_ERROR("Failed to allocate new validate context for 
DSC change\n");
+   update->dsc_config = false;
}
}
 }
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 26/30] drm/amd/display: DML Validation Dump/Check with Logging

2019-11-11 Thread Rodrigo Siqueira
From: Jaehyun Chung 

[Why]
Need validation that we are programming the expected values (rq, ttu, dlg)
from DML. This debug feature will output logs if we are programming
incorrect values and may help differentiate DAL issues from HW issues.

[How]
Dump relevant registers for each pipe with active stream. Compare current
reg values with the converted DML output. Log mismatches when found.

Signed-off-by: Jaehyun Chung 
Reviewed-by: Alvin Lee 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  18 +-
 drivers/gpu/drm/amd/display/dc/dc.h   |   1 +
 .../gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 310 
 .../gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 345 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  |   7 +
 5 files changed, 680 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7c04397eb7aa..60bc4c3a518e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2163,8 +2163,24 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, 
stream_status->plane_count, context);
}
}
-   if (dc->hwss.program_front_end_for_ctx && update_type != 
UPDATE_TYPE_FAST)
+   if (dc->hwss.program_front_end_for_ctx && update_type != 
UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+   if (dc->debug.validate_dml_output) {
+   for (i = 0; i < dc->res_pool->pipe_count; i++) {
+   struct pipe_ctx cur_pipe = 
context->res_ctx.pipe_ctx[i];
+   if (cur_pipe.stream == NULL)
+   continue;
+
+   
cur_pipe.plane_res.hubp->funcs->validate_dml_output(
+   cur_pipe.plane_res.hubp, 
dc->ctx,
+   
&context->res_ctx.pipe_ctx[i].rq_regs,
+   
&context->res_ctx.pipe_ctx[i].dlg_regs,
+   
&context->res_ctx.pipe_ctx[i].ttu_regs);
+   }
+   }
+#endif
+   }
 
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 3cb361917b4b..f30c77e44bb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -412,6 +412,7 @@ struct dc_debug_options {
 
bool nv12_iflip_vm_wa;
bool disable_dram_clock_change_vactive_support;
+   bool validate_dml_output;
 };
 
 struct dc_debug_data {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 7d9ffb81584a..2823be75b071 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -30,6 +30,8 @@
 #include "reg_helper.h"
 #include "basics/conversion.h"
 
+#define DC_LOGGER_INIT(logger)
+
 #define REG(reg)\
hubp2->hubp_regs->reg
 
@@ -1244,6 +1246,313 @@ void hubp2_read_state(struct hubp *hubp)
 
 }
 
+void hubp2_validate_dml_output(struct hubp *hubp,
+   struct dc_context *ctx,
+   struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+   struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+   struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+   struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+   struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+   struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+   struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+   DC_LOGGER_INIT(ctx->logger);
+
+   /* Requestor Regs */
+   REG_GET(HUBPRET_CONTROL,
+   DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+   REG_GET_4(DCN_EXPANSION_MODE,
+   DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+   PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+   MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+   CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+   REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+   CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+   MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+   META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+   MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+   DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+   MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+   SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+   PTE_ROW_HEIGHT_LINEAR, 
&rq_regs.rq_regs_l.pte_row_height_linear);
+   REG_GET_8(DCH

[PATCH 13/30] drm/amd/display: Connect DIG FE to its BE before link training starts

2019-11-11 Thread Rodrigo Siqueira
From: Nikola Cornij 

[why]
In SST mode no idle pattern will be generated after link training if
DIG FE is not connected to DIG BE.

Signed-off-by: Nikola Cornij 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index bdc8be373ff0..1e1f461cbb3c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1546,6 +1546,10 @@ static enum dc_status enable_link_dp(
panel_mode = dp_get_panel_mode(link);
dp_set_panel_mode(link, panel_mode);
 
+   /* We need to do this before the link training to ensure the idle 
pattern in SST
+* mode will be sent right after the link training */
+   link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+   
pipe_ctx->stream_res.stream_enc->id, true);
skip_video_pattern = true;
 
if (link_settings.link_rate == LINK_RATE_LOW)
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 08/30] drm/amd/display: Fix stereo with DCC enabled

2019-11-11 Thread Rodrigo Siqueira
From: Samson Tam 

[Why]
When sending DCC with Stereo, DCC gets enabled but the meta addresses
are 0. This happens momentarily before the meta addresses are populated
with a valid address.

[How]
Add call validate_dcc_with_meta_address() in
copy_surface_update_to_plane() to check for surface address and DCC
change.
When DCC has changed, check if DCC enable is true but meta address is 0.
If so, we turn DCC enable to false. When surface address has changed, we
check if DCC enable is false but meta address is not 0. If so, we turn
DCC enable back to true.  This will restore DCC enable to the proper
setting once the meta address is valid.

Signed-off-by: Samson Tam 
Reviewed-by: Jun Lei 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 27 
 1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1fdba13b3d0f..ffc8b1f89690 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1748,12 +1748,37 @@ static struct dc_stream_status *stream_get_status(
 
 static const enum surface_update_type update_surface_trace_level = 
UPDATE_TYPE_FULL;
 
+static void validate_dcc_with_meta_address(
+   struct dc_plane_dcc_param *dcc,
+   struct dc_plane_address *address)
+{
+   if ((address->grph.meta_addr.quad_part == 0) &&
+   dcc->enable) {
+   ASSERT(!dcc->enable);
+   dcc->enable = false;
+   } else if ((address->grph.meta_addr.quad_part != 0) &&
+   !dcc->enable)
+   dcc->enable = true;
+
+   if (address->type != PLN_ADDR_TYPE_GRAPHICS) {
+   if ((address->grph_stereo.right_meta_addr.quad_part == 0) &&
+   dcc->enable) {
+   ASSERT(!dcc->enable);
+   dcc->enable = false;
+   } else if ((address->grph_stereo.right_meta_addr.quad_part != 
0) &&
+   !dcc->enable)
+   dcc->enable = true;
+   }
+}
+
 static void copy_surface_update_to_plane(
struct dc_plane_state *surface,
struct dc_surface_update *srf_update)
 {
if (srf_update->flip_addr) {
surface->address = srf_update->flip_addr->address;
+   validate_dcc_with_meta_address(&surface->dcc, 
&surface->address);
+
surface->flip_immediate =
srf_update->flip_addr->flip_immediate;
surface->time.time_elapsed_in_us[surface->time.index] =
@@ -1802,6 +1827,8 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->global_alpha_value;
surface->dcc =
srf_update->plane_info->dcc;
+   validate_dcc_with_meta_address(&surface->dcc, 
&surface->address);
+
surface->sdr_white_level =
srf_update->plane_info->sdr_white_level;
surface->layer_index =
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 18/30] drm/amd/display: add color space option when sending link test pattern

2019-11-11 Thread Rodrigo Siqueira
From: Wenjing Liu 

[why]
In the TEST_MSIC dpcd register field definition, the test equipment
has the option to choose between YCbCr601 or YCbCr709.
We will apply corresponding YCbCr coefficient based on this test
request.

[how]
Add a new input parameter in dc_link_dp_set_test_pattern to allow the
selection between different color space.

Signed-off-by: Wenjing Liu 
Reviewed-by: Nikola Cornij 
Acked-by: Rodrigo Siqueira 
---
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  1 +
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  2 +
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 39 +--
 drivers/gpu/drm/amd/display/dc/dc_dp_types.h  | 10 ++---
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  2 +
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  9 -
 .../gpu/drm/amd/display/dc/dcn20/dcn20_opp.c  | 16 +++-
 .../gpu/drm/amd/display/dc/dcn20/dcn20_opp.h  |  1 +
 .../gpu/drm/amd/display/dc/inc/hw/hw_shared.h |  7 
 drivers/gpu/drm/amd/display/dc/inc/hw/opp.h   |  1 +
 .../amd/display/include/link_service_types.h  |  7 
 11 files changed, 85 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index bdb37e611015..f81d3439ee8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -657,6 +657,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct 
file *f, const char __us
dc_link_set_test_pattern(
link,
test_pattern,
+   DP_TEST_PATTERN_COLOR_SPACE_RGB,
&link_training_settings,
custom_pattern,
10);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index c58ac31869df..790a6b4046e0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3334,6 +3334,7 @@ void dc_link_disable_hpd(const struct dc_link *link)
 
 void dc_link_set_test_pattern(struct dc_link *link,
  enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space 
test_pattern_color_space,
  const struct link_training_settings 
*p_link_settings,
  const unsigned char *p_custom_pattern,
  unsigned int cust_pattern_size)
@@ -3342,6 +3343,7 @@ void dc_link_set_test_pattern(struct dc_link *link,
dc_link_dp_set_test_pattern(
link,
test_pattern,
+   test_pattern_color_space,
p_link_settings,
p_custom_pattern,
cust_pattern_size);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 68749c205fa0..55f734cb82bb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2493,6 +2493,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link 
*link)
dc_link_dp_set_test_pattern(
link,
test_pattern,
+   DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
&link_training_settings,
test_80_bit_pattern,
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
@@ -2504,6 +2505,8 @@ static void dp_test_send_link_test_pattern(struct dc_link 
*link)
union link_test_pattern dpcd_test_pattern;
union test_misc dpcd_test_params;
enum dp_test_pattern test_pattern;
+   enum dp_test_pattern_color_space test_pattern_color_space =
+   DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
 
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
@@ -2538,9 +2541,14 @@ static void dp_test_send_link_test_pattern(struct 
dc_link *link)
break;
}
 
+   test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+   DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+   DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+
dc_link_dp_set_test_pattern(
link,
test_pattern,
+   test_pattern_color_space,
NULL,
NULL,
0);
@@ -3430,7 +3438,8 @@ static bool is_dp_phy_pattern(enum dp_test_pattern 
test_pattern)
 
 static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *pipe_ctx,
-   enum dp_test_pattern test_pattern)
+   enum dp_test_pattern test_pattern,
+   enum dp_test_pattern_color_space 
test_pattern_color_space)
 {
enum controller_dp_t

[PATCH 11/30] drm/amd/display: Add DMUB param to load inst const from driver

2019-11-11 Thread Rodrigo Siqueira
From: Nicholas Kazlauskas 

[Why]
By default we shouldn't be trying to write secure registers during
DMUB hardware init.

[How]
Add a parameter to control whether we put the DMCUB into secure reset
and attempt to load CW0/CW1.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 2 ++
 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index 76e80138303b..046885940dba 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -281,12 +281,14 @@ struct dmub_srv_create_params {
  * @fb_base: base of the framebuffer aperture
  * @fb_offset: offset of the framebuffer aperture
  * @psp_version: psp version to pass for DMCU init
+ * @load_inst_const: true if DMUB should load inst const fw
  */
 struct dmub_srv_hw_params {
struct dmub_fb *fb[DMUB_WINDOW_TOTAL];
uint64_t fb_base;
uint64_t fb_offset;
uint32_t psp_version;
+   bool load_inst_const;
 };
 
 /**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 2d63ae80bda9..0dd32edbbcb3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -278,7 +278,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw1.region.base = DMUB_CW1_BASE;
cw1.region.top = cw1.region.base + stack_fb->size - 1;
 
-   if (dmub->hw_funcs.backdoor_load)
+   if (params->load_inst_const && dmub->hw_funcs.backdoor_load)
dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
}
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 23/30] drm/amd/display: Avoid conflict between HDR multiplier and 3dlut

2019-11-11 Thread Rodrigo Siqueira
From: Michael Strauss 

[WHY]
There can be a conflict between OS HDR multiplier and 3dlut HDR
multiplier, which are both sent to DC.

[HOW]
Instead of having dc determine which HDR multiplier to use, make the
decision in dm and send only the intended value in a surface update.
Store the current OS HDR multiplier and determine whether to use it or
the 3dlut's multiplier before sending the surface update to dc. Send
multiplier to dc in fixed31_32 format, dc then converts it to hw format.

Signed-off-by: Michael Strauss 
Reviewed-by: Krunoslav Kovac 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c| 17 ++---
 drivers/gpu/drm/amd/display/dc/dc.h |  9 -
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c   | 10 +++---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c  | 10 +-
 4 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d079ffadaeb4..04af2bf60073 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1469,11 +1469,6 @@ static enum surface_update_type 
get_plane_info_update_type(const struct dc_surfa
elevate_update_type(&update_type, UPDATE_TYPE_MED);
}
 
-   if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
-   update_flags->bits.sdr_white_level = 1;
-   elevate_update_type(&update_type, UPDATE_TYPE_MED);
-   }
-
if (u->plane_info->dcc.enable != u->surface->dcc.enable
|| u->plane_info->dcc.independent_64b_blks != 
u->surface->dcc.independent_64b_blks
|| u->plane_info->dcc.meta_pitch != 
u->surface->dcc.meta_pitch) {
@@ -1621,6 +1616,12 @@ static enum surface_update_type det_surface_update(const 
struct dc *dc,
update_flags->bits.gamma_change = 1;
}
 
+   if (u->hdr_mult.value)
+   if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+   update_flags->bits.hdr_mult = 1;
+   elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+   }
+
if (update_flags->bits.in_transfer_func_change) {
type = UPDATE_TYPE_MED;
elevate_update_type(&overall_type, type);
@@ -1802,8 +1803,6 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->global_alpha_value;
surface->dcc =
srf_update->plane_info->dcc;
-   surface->sdr_white_level =
-   srf_update->plane_info->sdr_white_level;
surface->layer_index =
srf_update->plane_info->layer_index;
}
@@ -1848,6 +1847,10 @@ static void copy_surface_update_to_plane(
memcpy(surface->lut3d_func, srf_update->lut3d_func,
sizeof(*surface->lut3d_func));
 
+   if (srf_update->hdr_mult.value)
+   surface->hdr_mult =
+   srf_update->hdr_mult;
+
if (srf_update->blend_tf &&
(surface->blend_tf !=
srf_update->blend_tf))
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index da9cb7dd22e6..3cb361917b4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -667,7 +667,7 @@ union dc_3dlut_state {
 struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
-   uint32_t hdr_multiplier;
+   struct fixed31_32 hdr_multiplier;
bool initialized; /*remove after diag fix*/
union dc_3dlut_state state;
struct dc_context *ctx;
@@ -694,7 +694,7 @@ union surface_update_flags {
uint32_t horizontal_mirror_change:1;
uint32_t per_pixel_alpha_change:1;
uint32_t global_alpha_change:1;
-   uint32_t sdr_white_level:1;
+   uint32_t hdr_mult:1;
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
@@ -738,7 +738,7 @@ struct dc_plane_state {
struct dc_bias_and_scale *bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
-   uint32_t sdr_white_level;
+   struct fixed31_32 hdr_mult;
 
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
@@ -783,7 +783,6 @@ struct dc_plane_info {
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
enum dc_color_space color_space;
-   unsigned int sdr_white_level;
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
@@ -807,7 +806,7 @@ struct dc_surface_update {
const struct dc_flip_addrs *flip_addr;
   

[PATCH 21/30] drm/amd/display: 3.2.60

2019-11-11 Thread Rodrigo Siqueira
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Reviewed-by: Aric Cyr 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 4c6c2fcc6a96..da9cb7dd22e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.59"
+#define DC_VER "3.2.60"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 17/30] drm/amd/display: cleanup of construct and destruct funcs

2019-11-11 Thread Rodrigo Siqueira
From: Anthony Koo 

[Why]
Too many construct functions which makes searching
difficult, especially on some debuggers.

[How]
Append all construct and destruct functions with dcn
number and object type to make each construct function
name unique

Signed-off-by: Anthony Koo 
Reviewed-by: Aric Cyr 
Acked-by: Rodrigo Siqueira 
---
 .../gpu/drm/amd/display/dc/bios/bios_parser.c |  4 +--
 .../drm/amd/display/dc/bios/bios_parser2.c|  8 ++---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 10 +++---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  8 ++---
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c |  8 ++---
 drivers/gpu/drm/amd/display/dc/core/dc_sink.c |  8 ++---
 .../gpu/drm/amd/display/dc/core/dc_stream.c   |  8 ++---
 .../gpu/drm/amd/display/dc/core/dc_surface.c  |  8 ++---
 .../amd/display/dc/dce100/dce100_resource.c   | 10 +++---
 .../amd/display/dc/dce110/dce110_resource.c   | 10 +++---
 .../amd/display/dc/dce112/dce112_resource.c   | 10 +++---
 .../amd/display/dc/dce120/dce120_resource.c   | 10 +++---
 .../drm/amd/display/dc/dce80/dce80_resource.c | 10 +++---
 .../drm/amd/display/dc/dcn10/dcn10_resource.c | 10 +++---
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 10 +++---
 .../drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +++---
 drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c  | 12 +++
 .../gpu/drm/amd/display/dc/gpio/hw_generic.c  | 23 -
 drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c  | 32 +--
 .../dc/irq/dce110/irq_service_dce110.c|  4 +--
 .../dc/irq/dce120/irq_service_dce120.c|  4 +--
 .../display/dc/irq/dce80/irq_service_dce80.c  |  4 +--
 .../display/dc/irq/dcn10/irq_service_dcn10.c  |  4 +--
 .../display/dc/irq/dcn20/irq_service_dcn20.c  |  4 +--
 .../display/dc/irq/dcn21/irq_service_dcn21.c  |  4 +--
 25 files changed, 104 insertions(+), 129 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 823843cd2613..6e57a1be0839 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create(
return NULL;
 }
 
-static void destruct(struct bios_parser *bp)
+static void bios_parser_destruct(struct bios_parser *bp)
 {
kfree(bp->base.bios_local_image);
kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb)
return;
}
 
-   destruct(bp);
+   bios_parser_destruct(bp);
 
kfree(bp);
*dcb = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 6e29ba8e582e..bfd444a1e8ed 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -111,7 +111,7 @@ static struct atom_encoder_caps_record 
*get_encoder_cap_record(
 
 #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
 
-static void destruct(struct bios_parser *bp)
+static void bios_parser2_destruct(struct bios_parser *bp)
 {
kfree(bp->base.bios_local_image);
kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb)
return;
}
 
-   destruct(bp);
+   bios_parser2_destruct(bp);
 
kfree(bp);
*dcb = NULL;
@@ -1927,7 +1927,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_board_layout_info = bios_get_board_layout_info,
 };
 
-static bool bios_parser_construct(
+static bool bios_parser2_construct(
struct bios_parser *bp,
struct bp_init_data *init,
enum dce_version dce_version)
@@ -2020,7 +2020,7 @@ struct dc_bios *firmware_parser_create(
if (!bp)
return NULL;
 
-   if (bios_parser_construct(bp, init, dce_version))
+   if (bios_parser2_construct(bp, init, dce_version))
return &bp->base;
 
kfree(bp);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1fdba13b3d0f..d079ffadaeb4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -533,7 +533,7 @@ void dc_stream_set_static_screen_events(struct dc *dc,
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, 
events);
 }
 
-static void destruct(struct dc *dc)
+static void dc_destruct(struct dc *dc)
 {
if (dc->current_state) {
dc_release_state(dc->current_state);
@@ -579,7 +579,7 @@ static void destruct(struct dc *dc)
 
 }
 
-static bool construct(struct dc *dc,
+static bool dc_construct(struct dc *dc,
const struct dc_init_data *init_params)
 {
struct dc_context *dc_ctx;
@@ -729,7 +729,7 @@ static bool construct(struct dc *dc,
 
 fail:
 
-   destruct(dc);
+   dc_destruct(dc);
return false;
 }
 
@@ -795,7 +79

[PATCH 24/30] drm/amd/display: Don't spin forever waiting for DMCUB phy/auto init

2019-11-11 Thread Rodrigo Siqueira
From: Nicholas Kazlauskas 

[Why]
It's an interface violation to use infinite loops within DMUB
service functions and we'll lock up the kernel by doing so.

[How]
Revert the function back to its intended functionality.
Move the infinite loops into DC/DM as necessary.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Sun peng Li 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c| 6 --
 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 9 ++---
 2 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c 
b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 61cefe0a3790..74ffe53eb49d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -112,8 +112,10 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv 
*dc_dmub_srv)
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
 
-   status = dmub_srv_wait_for_phy_init(dmub, 100);
-   if (status != DMUB_STATUS_OK)
+   status = dmub_srv_wait_for_phy_init(dmub, 1000);
+   if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB phy init: status=%d\n",
 status);
+   ASSERT(0);
+   }
 }
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 5ae1906ff1b1..60c574a39c6a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -400,19 +400,14 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct 
dmub_srv *dmub,
if (!dmub->hw_init || !dmub->hw_funcs.is_phy_init)
return DMUB_STATUS_INVALID;
 
-/* for (i = 0; i <= timeout_us; i += 10) {
+   for (i = 0; i <= timeout_us; i += 10) {
if (dmub->hw_funcs.is_phy_init(dmub))
return DMUB_STATUS_OK;
 
udelay(10);
-   }*/
-   while (!dmub->hw_funcs.is_phy_init(dmub)) {
-   ASSERT(i <= timeout_us);
-   i += 10;
-   udelay(10);
}
 
-   return DMUB_STATUS_OK;
+   return DMUB_STATUS_TIMEOUT;
 }
 
 enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 12/30] drm/amd/display: Add debugfs initalization on mst connectors

2019-11-11 Thread Rodrigo Siqueira
From: Mikita Lipski 

[why]
We were missing debugfs files on MST connectors as the files
weren't initialized.

[how]
Move connector debugfs initialization into connoctor's
init helper function so it will be called by both SST and MST
connectors. Also move connector registration so it will be
registered before we create the entries.

Signed-off-by: Mikita Lipski 
Reviewed-by: Nicholas Kazlauskas 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5573c5d9b328..05e0195d0005 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5661,6 +5661,12 @@ void amdgpu_dm_connector_init_helper(struct 
amdgpu_display_manager *dm,
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.freesync_capable_property, 0);
}
+
+#if defined(CONFIG_DEBUG_FS)
+   connector_debugfs_init(aconnector);
+   aconnector->debugfs_dpcd_address = 0;
+   aconnector->debugfs_dpcd_size = 0;
+#endif
 }
 
 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -5783,6 +5789,8 @@ static int amdgpu_dm_connector_init(struct 
amdgpu_display_manager *dm,
&aconnector->base,
&amdgpu_dm_connector_helper_funcs);
 
+   drm_connector_register(&aconnector->base);
+
amdgpu_dm_connector_init_helper(
dm,
aconnector,
@@ -5793,13 +5801,6 @@ static int amdgpu_dm_connector_init(struct 
amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
 
-   drm_connector_register(&aconnector->base);
-#if defined(CONFIG_DEBUG_FS)
-   connector_debugfs_init(aconnector);
-   aconnector->debugfs_dpcd_address = 0;
-   aconnector->debugfs_dpcd_size = 0;
-#endif
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 00/30] DC Patches 11 Nov 2019

2019-11-11 Thread Rodrigo Siqueira
This DC patchset brings improvements in multiple areas. In summary, we
have:

* Some adjustments in Renoir register
* Enhancements in DMUB
* Some code cleanup
* Improvements and fixes in debugfs

Alvin Lee (1):
  drm/amd/display: Changes in dc to allow full update in some cases

Anthony Koo (3):
  drm/amd/display: Clean up some code with unused registers
  drm/amd/display: cleanup of construct and destruct funcs
  drm/amd/display: cleanup of function pointer tables

Aric Cyr (2):
  drm/amd/display: 3.2.59
  drm/amd/display: 3.2.60

David (Dingchen) Zhang (1):
  drm/amd/display: add debugfs sdp hook up function for Navi

Hugo Hu (1):
  drm/amd/display: Update background color in bottommost mpcc

Ilya Bakoulin (1):
  drm/amd/display: Add DSC 422Native debug option

Jaehyun Chung (1):
  drm/amd/display: DML Validation Dump/Check with Logging

Joseph Gravenor (2):
  drm/amd/display: Renoir chroma viewport WA change formula
  drm/amd/display: Renoir chroma viewport WA Read the correct register

Joshua Aberback (1):
  drm/amd/display: Adjust DML workaround threshold

Leo (Hanghong) Ma (1):
  drm/amd/display: Add hubp clock status in DTN log for Navi

Michael Strauss (1):
  drm/amd/display: Avoid conflict between HDR multiplier and 3dlut

Mikita Lipski (2):
  drm/amd/display: Add debugfs initalization on mst connectors
  drm/amd/display: Fix debugfs on MST connectors

Nicholas Kazlauskas (5):
  drm/amd/display: Add DMUB service function check if hw initialized
  drm/amd/display: Add DMUB param to load inst const from driver
  drm/amd/display: Don't spin forever waiting for DMCUB phy/auto init
  drm/amd/display: Spin for DMCUB PHY init in DC
  drm/amd/display: Add Navi10 DMUB VBIOS code

Nikola Cornij (2):
  drm/amd/display: Connect DIG FE to its BE before link training starts
  drm/amd/display: Use a temporary copy of the current state when
updating DSC config

Samson Tam (2):
  drm/amd/display: Fix stereo with DCC enabled
  drm/amd/display: revert change causing DTN hang for RV

Stylon Wang (1):
  drm/amd/display: Fix incorrect deep color setting in YCBCR420 modes

Wenjing Liu (1):
  drm/amd/display: add color space option when sending link test pattern

Yongqiang Sun (1):
  drm/amd/display: Add debug trace for dmcub FW autoload.

abdoulaye berthe (1):
  drm/amd/display: add automated audio test support

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  31 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |   1 +
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |  10 +-
 .../gpu/drm/amd/display/dc/basics/Makefile|   2 +-
 .../gpu/drm/amd/display/dc/basics/dc_common.c | 101 
 .../gpu/drm/amd/display/dc/basics/dc_common.h |  42 ++
 .../gpu/drm/amd/display/dc/bios/bios_parser.c |   4 +-
 .../drm/amd/display/dc/bios/bios_parser2.c|   8 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  80 +++-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  14 +-
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c |   8 +-
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 131 -
 drivers/gpu/drm/amd/display/dc/core/dc_sink.c |   8 +-
 .../gpu/drm/amd/display/dc/core/dc_stream.c   |  11 +-
 .../gpu/drm/amd/display/dc/core/dc_surface.c  |   8 +-
 drivers/gpu/drm/amd/display/dc/dc.h   |  13 +-
 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c  |  23 +-
 drivers/gpu/drm/amd/display/dc/dc_dp_types.h  |  58 ++-
 drivers/gpu/drm/amd/display/dc/dc_link.h  |   3 +
 .../gpu/drm/amd/display/dc/dce/dce_hwseq.h|   3 +-
 .../amd/display/dc/dce100/dce100_resource.c   |  10 +-
 .../display/dc/dce110/dce110_hw_sequencer.c   |  12 +-
 .../display/dc/dce110/dce110_hw_sequencer.h   |   1 -
 .../amd/display/dc/dce110/dce110_resource.c   |  10 +-
 .../amd/display/dc/dce112/dce112_resource.c   |  10 +-
 .../amd/display/dc/dce120/dce120_resource.c   |  10 +-
 .../drm/amd/display/dc/dce80/dce80_resource.c |  10 +-
 drivers/gpu/drm/amd/display/dc/dcn10/Makefile |   3 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 446 ++
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.h | 181 +--
 .../dc/dcn10/dcn10_hw_sequencer_debug.h   |  43 ++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_init.c | 105 +
 .../gpu/drm/amd/display/dc/dcn10/dcn10_init.h |  33 ++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c  |  19 +-
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |  12 +-
 drivers/gpu/drm/amd/display/dc/dcn20/Makefile |   2 +-
 .../gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 313 
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 445 +++--
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.h| 150 +++---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 127 +
 .../gpu/drm/amd/display/dc/dcn20/dcn20_init.h |  33 ++
 .../gpu/drm/amd/display/dc/dcn20/dcn20_opp.c  |  16 +-
 .../gpu/drm/amd/display/dc/dcn20/dcn20_opp.h  |   1 +
 .../gpu/drm/amd/display/dc/dcn20/dcn20_optc.c |  12 +-
 .../gpu/drm/amd/display/dc/dcn20/dcn20_optc.h |   2 +-
 .../drm/amd/display/dc/dcn20/dcn20_resource.c |  36 +-
 .../drm/amd/displa

[PATCH 20/30] drm/amd/display: Add debug trace for dmcub FW autoload.

2019-11-11 Thread Rodrigo Siqueira
From: Yongqiang Sun 

[Why & How]
1. Add trace code enum for easy debugging.
2. Add trace during uC boot up, including loading phy FW
   and dmcu FW.
3. Change cache memory type back to write back,
   since write through has issue when resume from S0i3 100% hang after
   3.2ms.
4. Change CW3 base address to hard code value to avoid memory overlap
   with cw1.
5. Change polling phy init done to infinite loop to avoid dcn hang when
   dmcub uC stalled.
6. Add dmcub FW dis-assembly file to repositatory for debug purpose.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 .../amd/display/dmub/inc/dmub_trace_buffer.h  | 21 +--
 .../gpu/drm/amd/display/dmub/src/dmub_dcn20.c |  2 +-
 .../gpu/drm/amd/display/dmub/src/dmub_srv.c   | 20 +++---
 3 files changed, 33 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
index 9707706ba8ce..b0ee099d8a6e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
@@ -30,8 +30,25 @@
 #define LOAD_DMCU_FW   1
 #define LOAD_PHY_FW2
 
+
+enum dmucb_trace_code {
+   DMCUB__UNKNOWN,
+   DMCUB__MAIN_BEGIN,
+   DMCUB__PHY_INIT_BEGIN,
+   DMCUB__PHY_FW_SRAM_LOAD_BEGIN,
+   DMCUB__PHY_FW_SRAM_LOAD_END,
+   DMCUB__PHY_INIT_POLL_DONE,
+   DMCUB__PHY_INIT_END,
+   DMCUB__DMCU_ERAM_LOAD_BEGIN,
+   DMCUB__DMCU_ERAM_LOAD_END,
+   DMCUB__DMCU_ISR_LOAD_BEGIN,
+   DMCUB__DMCU_ISR_LOAD_END,
+   DMCUB__MAIN_IDLE,
+   DMCUB__PERF_TRACE,
+};
+
 struct dmcub_trace_buf_entry {
-   uint32_t trace_code;
+   enum dmucb_trace_code trace_code;
uint32_t tick_count;
uint32_t param0;
uint32_t param1;
@@ -40,6 +57,7 @@ struct dmcub_trace_buf_entry {
 #define TRACE_BUF_SIZE (1024) //1 kB
 #define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct 
dmcub_trace_buf_entry))
 
+
 struct dmcub_trace_buf {
uint32_t entry_count;
uint32_t clk_freq;
@@ -47,5 +65,4 @@ struct dmcub_trace_buf {
 };
 
 
-
 #endif /* _DMUB_TRACE_BUFFER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 89fd27758dd5..e2b2cf2e01fd 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -138,5 +138,5 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
 
 bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub)
 {
-   return REG_READ(DMCUB_SCRATCH10) != 0;
+   return REG_READ(DMCUB_SCRATCH10) == 0;
 }
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 0dd32edbbcb3..5ae1906ff1b1 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -26,6 +26,8 @@
 #include "../inc/dmub_srv.h"
 #include "dmub_dcn20.h"
 #include "dmub_dcn21.h"
+#include "dmub_trace_buffer.h"
+#include "os_types.h"
 /*
  * Note: the DMUB service is standalone. No additional headers should be
  * added below or above this line unless they reside within the DMUB
@@ -44,8 +46,6 @@
 /* Mailbox size */
 #define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE)
 
-/* Tracebuffer size */
-#define DMUB_TRACEBUFF_SIZE (1024) //1kB buffer
 
 /* Number of windows in use. */
 #define DMUB_NUM_WINDOWS (DMUB_WINDOW_5_TRACEBUFF + 1)
@@ -53,6 +53,7 @@
 
 #define DMUB_CW0_BASE (0x6000)
 #define DMUB_CW1_BASE (0x6100)
+#define DMUB_CW3_BASE (0x6300)
 #define DMUB_CW5_BASE (0x6500)
 
 static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
@@ -181,7 +182,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
mail->top = mail->base + DMUB_MAILBOX_SIZE;
 
trace_buff->base = dmub_align(mail->top, 256);
-   trace_buff->top = trace_buff->base + DMUB_TRACEBUFF_SIZE;
+   trace_buff->top = trace_buff->base + TRACE_BUF_SIZE;
 
out->fb_size = dmub_align(trace_buff->top, 4096);
 
@@ -291,7 +292,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw2.region.top = cw2.region.base + data_fb->size;
 
cw3.offset.quad_part = bios_fb->gpu_addr;
-   cw3.region.base = DMUB_CW1_BASE + stack_fb->size;
+   cw3.region.base = DMUB_CW3_BASE;
cw3.region.top = cw3.region.base + bios_fb->size;
 
cw4.offset.quad_part = mail_fb->gpu_addr;
@@ -394,19 +395,24 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct 
dmub_srv *dmub,
 enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
uint32_t timeout_us)
 {
-   uint32_t i;
+   uint32_t i = 0;
 
if (!dmub->hw_init || !dmub->hw_funcs.is_phy_init)
return DMUB_STATUS_INVALID;
 
-   for (i = 0; i <= timeout_us; i += 10) {
+/* for

[PATCH 06/30] drm/amd/display: Fix incorrect deep color setting in YCBCR420 modes

2019-11-11 Thread Rodrigo Siqueira
From: Stylon Wang 

[Why]
HDMI 2.0 HF-VSDB in EDID defines supported color depths in YCBCR420 modes.
But we did not honor these bit masks when choosing pixel encoding.
HDMI 2.0 compliance tests with deep color and YCBCR420 failed as a result.

[How]
Cap color depth based on y420_dc_modes from EDID.

Signed-off-by: Stylon Wang 
Reviewed-by: Nicholas Kazlauskas 
Acked-by: Rodrigo Siqueira 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 +++
 1 file changed, 25 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ed7cad4a182d..5573c5d9b328 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3611,12 +3611,26 @@ static void update_stream_scaling_settings(const struct 
drm_display_mode *mode,
 
 static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector,
- const struct drm_connector_state *state)
+ const struct drm_connector_state *state,
+ bool is_y420)
 {
-   uint8_t bpc = (uint8_t)connector->display_info.bpc;
+   uint8_t bpc;
 
-   /* Assume 8 bpc by default if no bpc is specified. */
-   bpc = bpc ? bpc : 8;
+   if (is_y420) {
+   bpc = 8;
+
+   /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+   if (connector->display_info.hdmi.y420_dc_modes & 
DRM_EDID_YCBCR420_DC_48)
+   bpc = 16;
+   else if (connector->display_info.hdmi.y420_dc_modes & 
DRM_EDID_YCBCR420_DC_36)
+   bpc = 12;
+   else if (connector->display_info.hdmi.y420_dc_modes & 
DRM_EDID_YCBCR420_DC_30)
+   bpc = 10;
+   } else {
+   bpc = (uint8_t)connector->display_info.bpc;
+   /* Assume 8 bpc by default if no bpc is specified. */
+   bpc = bpc ? bpc : 8;
+   }
 
if (!state)
state = connector->state;
@@ -3787,7 +3801,8 @@ static void fill_stream_properties_from_drm_display_mode(
 
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
timing_out->display_color_depth = convert_color_depth_from_display_info(
-   connector, connector_state);
+   connector, connector_state,
+   (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
 
@@ -4926,6 +4941,7 @@ static int dm_encoder_helper_atomic_check(struct 
drm_encoder *encoder,
struct drm_dp_mst_port *mst_port;
enum dc_color_depth color_depth;
int clock, bpp = 0;
+   bool is_y420 = false;
 
if (!aconnector->port || !aconnector->dc_sink)
return 0;
@@ -4937,7 +4953,10 @@ static int dm_encoder_helper_atomic_check(struct 
drm_encoder *encoder,
return 0;
 
if (!state->duplicated) {
-   color_depth = convert_color_depth_from_display_info(connector, 
conn_state);
+   is_y420 = drm_mode_is_420_also(&connector->display_info, 
adjusted_mode) &&
+   aconnector->force_yuv420_output;
+   color_depth = convert_color_depth_from_display_info(connector, 
conn_state,
+   is_y420);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp);
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 03/30] drm/amd/display: Renoir chroma viewport WA Read the correct register

2019-11-11 Thread Rodrigo Siqueira
From: Joseph Gravenor 

[why]
Before we were reading registers specific to luma size, which caused a black 
line
to appear on the screen from time to time, as although the luma row height
is generally the same as the chroma row height for the video case, it will 
sometimes
be one more

[how]
Read the register specific for the chroma size

Signed-off-by: Joseph Gravenor 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index d86b6b6211bc..32e8b589aeb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -200,8 +200,8 @@ void hubp21_set_viewport(
int pte_row_height = 0;
int pte_rows = 0;
 
-   REG_GET(DCHUBP_REQ_SIZE_CONFIG,
-   PTE_ROW_HEIGHT_LINEAR, &pte_row_height);
+   REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
+   PTE_ROW_HEIGHT_LINEAR_C, &pte_row_height);
 
pte_row_height = 1 << (pte_row_height + 3);
pte_rows = (viewport_c->height / pte_row_height) + 1;
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 22/30] drm/amd/display: add debugfs sdp hook up function for Navi

2019-11-11 Thread Rodrigo Siqueira
From: "David (Dingchen) Zhang" 

[why]
need to send immediate SDP message via debugfs on Navi board.

[how]
hook up the DCN1x encoder function of sending immediate sdp
message to DCN2.

Signed-off-by: David (Dingchen) Zhang 
Reviewed-by: Harry Wentland 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 33cc40fb9687..be0978401476 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -561,6 +561,8 @@ static const struct stream_encoder_funcs 
dcn20_str_enc_funcs = {
enc2_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets =
enc2_stream_encoder_update_dp_info_packets,
+   .send_immediate_sdp_message =
+   enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 27/30] drm/amd/display: Spin for DMCUB PHY init in DC

2019-11-11 Thread Rodrigo Siqueira
From: Nicholas Kazlauskas 

[Why]
DCN will hang if we access registers before PHY init is done.

So we need to spin or abort.

[How]
On hardware with DMCUB running and working we shouldn't time out
waiting for this to finish and we shouldn't hit the spin cycle.

If there's no hardware support then we should exit out of the function
early assuming that PHY init was already done elsewhere.

If we hit the timeout then there's likely a bug in firmware or software
and we need to debug - add errors and asserts as appropriate.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 21 
 1 file changed, 17 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c 
b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 74ffe53eb49d..03e2842cb573 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -112,10 +112,23 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv 
*dc_dmub_srv)
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
 
-   status = dmub_srv_wait_for_phy_init(dmub, 1000);
-   if (status != DMUB_STATUS_OK) {
-   DC_ERROR("Error waiting for DMUB phy init: status=%d\n",
-status);
+   for (;;) {
+   /* Wait up to a second for PHY init. */
+   status = dmub_srv_wait_for_phy_init(dmub, 100);
+   if (status == DMUB_STATUS_OK)
+   /* Initialization OK */
+   break;
+
+   DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
ASSERT(0);
+
+   if (status != DMUB_STATUS_TIMEOUT)
+   /*
+* Server likely initialized or we don't have
+* DMCUB HW support - this won't end.
+*/
+   break;
+
+   /* Continue spinning so we don't hang the ASIC. */
}
 }
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 04/30] drm/amd/display: Add hubp clock status in DTN log for Navi

2019-11-11 Thread Rodrigo Siqueira
From: "Leo (Hanghong) Ma" 

[Why]
For debug purpose, we need to check HUBP_CLOCK_ENABLE in DTN
log debugfs on Navi.

[How]
Add related register read in dcn20_hubp.c.

Signed-off-by: Leo (Hanghong) Ma 
Reviewed-by: Harry Wentland 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index f04325604f6c..7d9ffb81584a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1202,6 +1202,9 @@ void hubp2_read_state_common(struct hubp *hubp)
HUBP_TTU_DISABLE, &s->ttu_disable,
HUBP_UNDERFLOW_STATUS, &s->underflow_status);
 
+   REG_GET(HUBP_CLK_CNTL,
+   HUBP_CLOCK_ENABLE, &s->clock_en);
+
REG_GET(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &s->min_ttu_vblank);
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 01/30] drm/amd/display: add automated audio test support

2019-11-11 Thread Rodrigo Siqueira
From: abdoulaye berthe 

Signed-off-by: abdoulaye berthe 
Reviewed-by: Wenjing Liu 
Acked-by: Rodrigo Siqueira 
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 92 +++
 drivers/gpu/drm/amd/display/dc/dc_dp_types.h  | 48 --
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  1 +
 3 files changed, 134 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 65de32fbcc83..68749c205fa0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2546,6 +2546,92 @@ static void dp_test_send_link_test_pattern(struct 
dc_link *link)
0);
 }
 
+static void dp_test_get_audio_test_data(struct dc_link *link, bool 
disable_video)
+{
+   union audio_test_modedpcd_test_mode = {0};
+   struct audio_test_pattern_type   dpcd_pattern_type = {0};
+   union audio_test_pattern_period  
dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
+   enum dp_test_pattern test_pattern = 
DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+
+   struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+   struct pipe_ctx *pipe_ctx = &pipes[0];
+   unsigned int channel_count;
+   unsigned int channel = 0;
+   unsigned int modes = 0;
+   unsigned int sampling_rate_in_hz = 0;
+
+   // get audio test mode and test pattern parameters
+   core_link_read_dpcd(
+   link,
+   DP_TEST_AUDIO_MODE,
+   &dpcd_test_mode.raw,
+   sizeof(dpcd_test_mode));
+
+   core_link_read_dpcd(
+   link,
+   DP_TEST_AUDIO_PATTERN_TYPE,
+   &dpcd_pattern_type.value,
+   sizeof(dpcd_pattern_type));
+
+   channel_count = dpcd_test_mode.bits.channel_count + 1;
+
+   // read pattern periods for requested channels when sawTooth pattern is 
requested
+   if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
+   dpcd_pattern_type.value == 
AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
+
+   test_pattern = (dpcd_pattern_type.value == 
AUDIO_TEST_PATTERN_SAWTOOTH) ?
+   DP_TEST_PATTERN_AUDIO_SAWTOOTH : 
DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+   // read period for each channel
+   for (channel = 0; channel < channel_count; channel++) {
+   core_link_read_dpcd(
+   link,
+   
DP_TEST_AUDIO_PERIOD_CH1 + channel,
+   
&dpcd_pattern_period[channel].raw,
+   
sizeof(dpcd_pattern_period[channel]));
+   }
+   }
+
+   // translate sampling rate
+   switch (dpcd_test_mode.bits.sampling_rate) {
+   case AUDIO_SAMPLING_RATE_32KHZ:
+   sampling_rate_in_hz = 32000;
+   break;
+   case AUDIO_SAMPLING_RATE_44_1KHZ:
+   sampling_rate_in_hz = 44100;
+   break;
+   case AUDIO_SAMPLING_RATE_48KHZ:
+   sampling_rate_in_hz = 48000;
+   break;
+   case AUDIO_SAMPLING_RATE_88_2KHZ:
+   sampling_rate_in_hz = 88200;
+   break;
+   case AUDIO_SAMPLING_RATE_96KHZ:
+   sampling_rate_in_hz = 96000;
+   break;
+   case AUDIO_SAMPLING_RATE_176_4KHZ:
+   sampling_rate_in_hz = 176400;
+   break;
+   case AUDIO_SAMPLING_RATE_192KHZ:
+   sampling_rate_in_hz = 192000;
+   break;
+   default:
+   sampling_rate_in_hz = 0;
+   break;
+   }
+
+   link->audio_test_data.flags.test_requested = 1;
+   link->audio_test_data.flags.disable_video = disable_video;
+   link->audio_test_data.sampling_rate = sampling_rate_in_hz;
+   link->audio_test_data.channel_count = channel_count;
+   link->audio_test_data.pattern_type = test_pattern;
+
+   if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
+   for (modes = 0; modes < 
pipe_ctx->stream->audio_info.mode_count; modes++) {
+   link->audio_test_data.pattern_period[modes] = 
dpcd_pattern_period[modes].bits.pattern_period;
+   }
+   }
+}
+
 static void handle_automated_test(struct dc_link *link)
 {
union test_request test_request;
@@ -2575,6 +2661,12 @@ static void handle_automated_test(struct dc_link *link)
dp_test_send_link_test_pattern(link);
test_response.bits.ACK = 1;
}
+
+   if (test_request.bits.AUDIO_TEST_PATTERN) {
+   dp_test_get_audio_test_data(link, 
test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
+   test_response.bits.ACK = 1;
+   }
+
if (test_request.bits.PHY_TEST_PATTERN) {
  

[PATCH 02/30] drm/amd/display: Renoir chroma viewport WA change formula

2019-11-11 Thread Rodrigo Siqueira
From: Joseph Gravenor 

[why]
we want to increase the pte row plus 1 line if chroma viewport
height is integer multiple of the pte row height

[how]
instead of ceiling viewport height, we floor it. this allows
us to accommodate both cases: those where the chroma viewport
height is integer multiple of the pte row height and those where
it is not

Signed-off-by: Joseph Gravenor 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 1ddd6ae22155..d86b6b6211bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -204,8 +204,8 @@ void hubp21_set_viewport(
PTE_ROW_HEIGHT_LINEAR, &pte_row_height);
 
pte_row_height = 1 << (pte_row_height + 3);
-   pte_rows = (viewport_c->height + pte_row_height - 1) / 
pte_row_height;
-   patched_viewport_height = pte_rows * pte_row_height + 3;
+   pte_rows = (viewport_c->height / pte_row_height) + 1;
+   patched_viewport_height = pte_rows * pte_row_height + 1;
}
 
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 09/30] drm/amd/display: Changes in dc to allow full update in some cases

2019-11-11 Thread Rodrigo Siqueira
From: Alvin Lee 

Changes in dc to allow for different cases where full update is
required.

Signed-off-by: Alvin Lee 
Reviewed-by: Jun Lei 
Acked-by: Rodrigo Siqueira 
---
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 22 +++
 .../drm/amd/display/dc/dcn20/dcn20_resource.h |  2 +-
 .../drm/amd/display/dc/dcn21/dcn21_resource.c | 11 +-
 .../gpu/drm/amd/display/dc/inc/core_types.h   |  2 +-
 4 files changed, 21 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 3d5a79ff1151..f69b45eeb766 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1773,10 +1773,11 @@ void dcn20_populate_dml_writeback_from_context(
 }
 
 int dcn20_populate_dml_pipes_from_context(
-   struct dc *dc, struct resource_context *res_ctx, 
display_e2e_pipe_params_st *pipes)
+   struct dc *dc, struct dc_state *context, 
display_e2e_pipe_params_st *pipes)
 {
int pipe_cnt, i;
bool synchronized_vblank = true;
+   struct resource_context *res_ctx = &context->res_ctx;
 
for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1796,10 +1797,13 @@ int dcn20_populate_dml_pipes_from_context(
 
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing = 
&res_ctx->pipe_ctx[i].stream->timing;
+   unsigned int v_total;
int output_bpc;
 
if (!res_ctx->pipe_ctx[i].stream)
continue;
+
+   v_total = timing->v_total;
/* todo:
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
pipes[pipe_cnt].pipe.src.dcc = 0;
@@ -1812,7 +1816,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
/* 1/2 vblank */

pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
-   (timing->v_total - timing->v_addressable
+   (v_total - timing->v_addressable
- timing->v_border_top - 
timing->v_border_bottom) / 2;
/* 36 bytes dp, 32 hdmi */
pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
@@ -1826,13 +1830,13 @@ int dcn20_populate_dml_pipes_from_context(
- timing->h_addressable
- timing->h_border_left
- timing->h_border_right;
-   pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - 
timing->v_front_porch;
+   pipes[pipe_cnt].pipe.dest.vblank_start = v_total - 
timing->v_front_porch;
pipes[pipe_cnt].pipe.dest.vblank_end = 
pipes[pipe_cnt].pipe.dest.vblank_start
- timing->v_addressable
- timing->v_border_top
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
-   pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+   pipes[pipe_cnt].pipe.dest.vtotal = v_total;
pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
@@ -1967,8 +1971,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
pipes[pipe_cnt].pipe.src.is_hsplit = 0;
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
-   pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
-   pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
+   pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
+   pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
} else {
struct dc_plane_state *pln = 
res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = 
&res_ctx->pipe_ctx[i].plane_res.scl_data;
@@ -2430,7 +2434,7 @@ bool dcn20_fast_validate_bw(
 
dcn20_merge_pipes_for_validate(dc, context);
 
-   pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, 
&context->res_ctx, pipes);
+   pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);
 
*pipe_cnt_out = pipe_cnt;
 
@@ -2576,10 +2580,10 @@ static void dcn20_calculate_wm(
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
-

[PATCH 15/30] drm/amd/display: revert change causing DTN hang for RV

2019-11-11 Thread Rodrigo Siqueira
From: Samson Tam 

[Why]
Hanging on RV for DTN driver verifier

[How]
Roll back change and investigate further

Signed-off-by: Samson Tam 
Reviewed-by: Jun Lei 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 27 
 1 file changed, 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ffc8b1f89690..1fdba13b3d0f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1748,37 +1748,12 @@ static struct dc_stream_status *stream_get_status(
 
 static const enum surface_update_type update_surface_trace_level = 
UPDATE_TYPE_FULL;
 
-static void validate_dcc_with_meta_address(
-   struct dc_plane_dcc_param *dcc,
-   struct dc_plane_address *address)
-{
-   if ((address->grph.meta_addr.quad_part == 0) &&
-   dcc->enable) {
-   ASSERT(!dcc->enable);
-   dcc->enable = false;
-   } else if ((address->grph.meta_addr.quad_part != 0) &&
-   !dcc->enable)
-   dcc->enable = true;
-
-   if (address->type != PLN_ADDR_TYPE_GRAPHICS) {
-   if ((address->grph_stereo.right_meta_addr.quad_part == 0) &&
-   dcc->enable) {
-   ASSERT(!dcc->enable);
-   dcc->enable = false;
-   } else if ((address->grph_stereo.right_meta_addr.quad_part != 
0) &&
-   !dcc->enable)
-   dcc->enable = true;
-   }
-}
-
 static void copy_surface_update_to_plane(
struct dc_plane_state *surface,
struct dc_surface_update *srf_update)
 {
if (srf_update->flip_addr) {
surface->address = srf_update->flip_addr->address;
-   validate_dcc_with_meta_address(&surface->dcc, 
&surface->address);
-
surface->flip_immediate =
srf_update->flip_addr->flip_immediate;
surface->time.time_elapsed_in_us[surface->time.index] =
@@ -1827,8 +1802,6 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->global_alpha_value;
surface->dcc =
srf_update->plane_info->dcc;
-   validate_dcc_with_meta_address(&surface->dcc, 
&surface->address);
-
surface->sdr_white_level =
srf_update->plane_info->sdr_white_level;
surface->layer_index =
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 07/30] drm/amd/display: 3.2.59

2019-11-11 Thread Rodrigo Siqueira
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Reviewed-by: Aric Cyr 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 2e6b3ecd564d..4c6c2fcc6a96 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.58"
+#define DC_VER "3.2.59"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 16/30] drm/amd/display: Fix debugfs on MST connectors

2019-11-11 Thread Rodrigo Siqueira
From: Mikita Lipski 

[why]
Previous patch allowed to initialize debugfs entries on both MST
and SST connectors, but MST connectors get registered much later
which exposed an issue of debugfs entries being initialized in the
same folder.

[how]
Return SST debugfs entries' initialization back to where it was.
For MST connectors we should initialize debugfs entries in connector
register function after the connector is registered.

Signed-off-by: Mikita Lipski 
Reviewed-by: Nicholas Kazlauskas 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 15 +++
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c   | 10 +-
 2 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 05e0195d0005..5573c5d9b328 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5661,12 +5661,6 @@ void amdgpu_dm_connector_init_helper(struct 
amdgpu_display_manager *dm,
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.freesync_capable_property, 0);
}
-
-#if defined(CONFIG_DEBUG_FS)
-   connector_debugfs_init(aconnector);
-   aconnector->debugfs_dpcd_address = 0;
-   aconnector->debugfs_dpcd_size = 0;
-#endif
 }
 
 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -5789,8 +5783,6 @@ static int amdgpu_dm_connector_init(struct 
amdgpu_display_manager *dm,
&aconnector->base,
&amdgpu_dm_connector_helper_funcs);
 
-   drm_connector_register(&aconnector->base);
-
amdgpu_dm_connector_init_helper(
dm,
aconnector,
@@ -5801,6 +5793,13 @@ static int amdgpu_dm_connector_init(struct 
amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
 
+   drm_connector_register(&aconnector->base);
+#if defined(CONFIG_DEBUG_FS)
+   connector_debugfs_init(aconnector);
+   aconnector->debugfs_dpcd_address = 0;
+   aconnector->debugfs_dpcd_size = 0;
+#endif
+
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 1a17ea1b42e0..2a05382d548f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,7 +36,9 @@
 #include "dc_link_ddc.h"
 
 #include "i2caux_interface.h"
-
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
 /* #define TRACE_DPCD */
 
 #ifdef TRACE_DPCD
@@ -162,6 +164,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector 
*connector)
to_amdgpu_dm_connector(connector);
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
 
+#if defined(CONFIG_DEBUG_FS)
+   connector_debugfs_init(amdgpu_dm_connector);
+   amdgpu_dm_connector->debugfs_dpcd_address = 0;
+   amdgpu_dm_connector->debugfs_dpcd_size = 0;
+#endif
+
return drm_dp_mst_connector_late_register(connector, port);
 }
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 05/30] drm/amd/display: Update background color in bottommost mpcc

2019-11-11 Thread Rodrigo Siqueira
From: Hugo Hu 

[Why]
Background color only takes effect in bottommost mpcc.

[How]
Update background color in bottommost mpcc.

Signed-off-by: Hugo Hu 
Reviewed-by: Yongqiang Sun 
Acked-by: Rodrigo Siqueira 
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c  | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 220154f7911a..04f863499cfb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc,
int mpcc_id)
 {
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+   struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+   uint32_t bg_r_cr, bg_g_y, bg_b_cb;
+
+   /* find bottommost mpcc. */
+   while (bottommost_mpcc->mpcc_bot) {
+   bottommost_mpcc = bottommost_mpcc->mpcc_bot;
+   }
 
/* mpc color is 12 bit.  tg_color is 10 bit */
/* todo: might want to use 16 bit to represent color and have each
 * hw block translate to correct color depth.
 */
-   uint32_t bg_r_cr = bg_color->color_r_cr << 2;
-   uint32_t bg_g_y = bg_color->color_g_y << 2;
-   uint32_t bg_b_cb = bg_color->color_b_cb << 2;
+   bg_r_cr = bg_color->color_r_cr << 2;
+   bg_g_y = bg_color->color_g_y << 2;
+   bg_b_cb = bg_color->color_b_cb << 2;
 
-   REG_SET(MPCC_BG_R_CR[mpcc_id], 0,
+   REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_R_CR, bg_r_cr);
-   REG_SET(MPCC_BG_G_Y[mpcc_id], 0,
+   REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_G_Y, bg_g_y);
-   REG_SET(MPCC_BG_B_CB[mpcc_id], 0,
+   REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
 }
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 14/30] drm/amd/display: Clean up some code with unused registers

2019-11-11 Thread Rodrigo Siqueira
From: Anthony Koo 

[Why]
Unused register in the code

[How]
Remove unused register

Signed-off-by: Anthony Koo 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h 
b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index ebe8f9a21be2..bff03a68aa01 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -792,8 +792,7 @@ struct dce_hwseq_registers {
type D2VGA_MODE_ENABLE; \
type D3VGA_MODE_ENABLE; \
type D4VGA_MODE_ENABLE; \
-   type AZALIA_AUDIO_DTO_MODULE;\
-   type HPO_HDMISTREAMCLK_GATE_DIS;
+   type AZALIA_AUDIO_DTO_MODULE;
 
 struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 10/30] drm/amd/display: Add DMUB service function check if hw initialized

2019-11-11 Thread Rodrigo Siqueira
From: Nicholas Kazlauskas 

[Why]
We want to avoid reprogramming the cache window when possible.

We don't need to worry about it for S3 but we *do* need to worry about
it for S4 resume.

DM can check whether hardware should be reinitialized or store software
state when going to S4 to know whether we need to reprogram hardware.

[How]
Add helpers to the DMUB service to check hardware initialization state.

DM will hook it up later.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Rodrigo Siqueira 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h   | 11 +++
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c |  5 +
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h |  2 ++
 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c   | 14 ++
 4 files changed, 32 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index aa8f0396616d..76e80138303b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -252,6 +252,8 @@ struct dmub_srv_hw_funcs {
 
bool (*is_supported)(struct dmub_srv *dmub);
 
+   bool (*is_hw_init)(struct dmub_srv *dmub);
+
bool (*is_phy_init)(struct dmub_srv *dmub);
 
bool (*is_auto_load_done)(struct dmub_srv *dmub);
@@ -380,6 +382,15 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv 
*dmub,
 enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
 bool *is_supported);
 
+/**
+ * dmub_srv_is_hw_init() - returns hardware init state
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init);
+
 /**
  * dmub_srv_hw_init() - initializes the underlying DMUB hardware
  * @dmub: the dmub service
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 236a4156bbe1..89fd27758dd5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -122,6 +122,11 @@ void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, 
uint32_t wptr_offset)
REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset);
 }
 
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub)
+{
+   return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0;
+}
+
 bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
 {
uint32_t supported = 0;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 41269da40363..e1ba748ca594 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -55,6 +55,8 @@ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
 
 void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
 
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
+
 bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
 
 bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 229eab7277d1..2d63ae80bda9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -76,6 +76,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum 
dmub_asic asic)
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
funcs->is_phy_init = dmub_dcn20_is_phy_init;
+   funcs->is_hw_init = dmub_dcn20_is_hw_init;
 
if (asic == DMUB_ASIC_DCN21) {
funcs->backdoor_load = dmub_dcn21_backdoor_load;
@@ -234,6 +235,19 @@ enum dmub_status dmub_srv_has_hw_support(struct dmub_srv 
*dmub,
return DMUB_STATUS_OK;
 }
 
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
+{
+   *is_hw_init = false;
+
+   if (!dmub->sw_init)
+   return DMUB_STATUS_INVALID;
+
+   if (dmub->hw_funcs.is_hw_init)
+   *is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
+
+   return DMUB_STATUS_OK;
+}
+
 enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
  const struct dmub_srv_hw_params *params)
 {
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/powerplay: avoid DPM reenable process on Navi1x ASICs

2019-11-11 Thread Matt Coffin
Patch is Tested-by: Matt Coffin 

On 11/11/19 2:25 AM, Evan Quan wrote:
> Otherwise, without RLC reinitialization, the DPM reenablement
> will fail. That affects the custom pptable uploading.
> 
> Change-Id: I6fe2ed5ce23f2a5b66f371c0b6d1f924837e5af6
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 32 +++
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
>  2 files changed, 26 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 76a4154b3be2..a4d67b30fd72 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1293,10 +1293,25 @@ static int smu_hw_fini(void *handle)
>   return ret;
>   }
>  
> - ret = smu_stop_dpms(smu);
> - if (ret) {
> - pr_warn("Fail to stop Dpms!\n");
> - return ret;
> + /*
> +  * For custom pptable uploading, skip the DPM features
> +  * disable process on Navi1x ASICs.
> +  *   - As the gfx related features are under control of
> +  * RLC on those ASICs. RLC reinitialization will be
> +  * needed to reenable them. That will cost much more
> +  * efforts.
> +  *
> +  *   - SMU firmware can handle the DPM reenablement
> +  * properly.
> +  */
> + if (!smu->uploading_custom_pp_table ||
> + !((adev->asic_type >= CHIP_NAVI10) &&
> +   (adev->asic_type <= CHIP_NAVI12))) {
> + ret = smu_stop_dpms(smu);
> + if (ret) {
> + pr_warn("Fail to stop Dpms!\n");
> + return ret;
> + }
>   }
>  
>   kfree(table_context->driver_pptable);
> @@ -1324,13 +1339,16 @@ int smu_reset(struct smu_context *smu)
>   struct amdgpu_device *adev = smu->adev;
>   int ret = 0;
>  
> + smu->uploading_custom_pp_table = true;
> +
>   ret = smu_hw_fini(adev);
>   if (ret)
> - return ret;
> + goto out;
>  
>   ret = smu_hw_init(adev);
> - if (ret)
> - return ret;
> +
> +out:
> + smu->uploading_custom_pp_table = false;
>  
>   return ret;
>  }
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 8120e7587585..215841f5fb93 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -391,6 +391,7 @@ struct smu_context
>  
>   uint32_t smc_if_version;
>  
> + bool uploading_custom_pp_table;
>  };
>  
>  struct i2c_adapter;
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/powerplay: do proper cleanups on hw_fini

2019-11-11 Thread Matt Coffin
Thanks Evan. I can confirm that the linked patch resolves the issue for me.

I commented and resolved the bug as well in case other people find it.

Cheers,
Matt

On 11/11/19 2:28 AM, Quan, Evan wrote:
> Just sent out a patch which should be able to address this issue.
> https://lists.freedesktop.org/archives/amd-gfx/2019-November/042458.html
> 
> Regards,
> Evan
>> -Original Message-
>> From: Matt Coffin 
>> Sent: Saturday, November 9, 2019 4:50 AM
>> To: Quan, Evan ; amd-gfx@lists.freedesktop.org
>> Cc: Li, Candice ; Gui, Jack ; Alex
>> Deucher 
>> Subject: Re: [PATCH] drm/amd/powerplay: do proper cleanups on hw_fini
>>
>> Hey guys,
>>
>>
>>
>> This patch caused some kind of reversion with smu_reset on Navi10. I'm no
>> expert since everything I know comes from just reading through the code, so
>> this could be some kind of intended behavior, but after this patch, if you 
>> write a
>> pptable to the sysfs pp_table interface on navi10, then the SMU will fail to 
>> reset
>> successfully, and the result is seemingly an unrecoverable situation.
>>
>>
>>
>> I put in a report on bugzilla with dmesg logs
>> :
>> https://bugs.freedesktop.org/show_bug.cgi?id=112234
>>
>>
>> Finding this change was the result of a bisect to find where the issue 
>> started,
>> and reverting the changes to smu_hw_fini resolved the issue.
>> Any advice on possible proper fixes?
>>
>> Thanks in advance,
>>
>> Matt
>>
>> On 9/2/19 9:44 PM, Quan, Evan wrote:
>>> These are needed for smu_reset support.
>>>
>>> Change-Id: If29ede4b99758adb08fd4e16665f44fd893ec99b
>>> Signed-off-by: Evan Quan 
>>> ---
>>>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 17
>> +
>>>  drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  3 +++
>>>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 10 ++
>>>  3 files changed, 30 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
>>> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
>>> index d5ee13a78eb7..3cf8d944f890 100644
>>> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
>>> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
>>> @@ -1286,6 +1286,11 @@ static int smu_hw_init(void *handle)
>>> return ret;
>>>  }
>>>
>>> +static int smu_stop_dpms(struct smu_context *smu) {
>>> +   return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); }
>>> +
>>>  static int smu_hw_fini(void *handle)
>>>  {
>>> struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@
>>> -1298,6 +1303,18 @@ static int smu_hw_fini(void *handle)
>>> smu_powergate_vcn(&adev->smu, true);
>>> }
>>>
>>> +   ret = smu_stop_thermal_control(smu);
>>> +   if (ret) {
>>> +   pr_warn("Fail to stop thermal control!\n");
>>> +   return ret;
>>> +   }
>>> +
>>> +   ret = smu_stop_dpms(smu);
>>> +   if (ret) {
>>> +   pr_warn("Fail to stop Dpms!\n");
>>> +   return ret;
>>> +   }
>>> +
>>> kfree(table_context->driver_pptable);
>>> table_context->driver_pptable = NULL;
>>>
>>> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
>>> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
>>> index b19224cb6d6d..8e4b0ad24712 100644
>>> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
>>> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
>>> @@ -498,6 +498,7 @@ struct smu_funcs
>>> int (*get_current_clk_freq)(struct smu_context *smu, enum
>> smu_clk_type clk_id, uint32_t *value);
>>> int (*init_max_sustainable_clocks)(struct smu_context *smu);
>>> int (*start_thermal_control)(struct smu_context *smu);
>>> +   int (*stop_thermal_control)(struct smu_context *smu);
>>> int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors
>> sensor,
>>>void *data, uint32_t *size);
>>> int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t
>>> clk); @@ -647,6 +648,8 @@ struct smu_funcs
>>> ((smu)->ppt_funcs->set_thermal_fan_table ?
>>> (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)  #define
>> smu_start_thermal_control(smu) \
>>> ((smu)->funcs->start_thermal_control?
>>> (smu)->funcs->start_thermal_control((smu)) : 0)
>>> +#define smu_stop_thermal_control(smu) \
>>> +   ((smu)->funcs->stop_thermal_control?
>>> +(smu)->funcs->stop_thermal_control((smu)) : 0)
>>>  #define smu_read_sensor(smu, sensor, data, size) \
>>> ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs-
>>> read_sensor((smu),
>>> (sensor), (data), (size)) : 0)  #define smu_smc_read_sensor(smu,
>>> sensor, data, size) \ diff --git
>>> a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
>>> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
>>> index db5e94ce54af..1a38af84394e 100644
>>> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
>>> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
>>> @@ -1209,6 +1209,15 @@ static int
>> smu_v11_0_start_thermal_control(struct smu_context *smu)
>>> return ret;
>>>  }
>>>
>>> +static int smu_v11_0_stop_thermal_control(struct smu_context *smu) {
>>> +   struct amdgpu_device *adev = s

[PATCH 3/3] drm/amdkfd: Fix a bug when calculating save_area_used_size

2019-11-11 Thread Yong Zhao
workgroup context data writes from m->cp_hqd_cntl_stack_size, so we
should deduct it when calculating the used size.

Change-Id: I5252e25662c3b8221f451c39115bf084d1911eae
Signed-off-by: Yong Zhao 
---
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index d3380c5bdbde..3a2ee1f01aae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -302,7 +302,8 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
 
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
-   *save_area_used_size = m->cp_hqd_wg_state_offset;
+   *save_area_used_size = m->cp_hqd_wg_state_offset -
+   m->cp_hqd_cntl_stack_size;;
 
if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
return -EFAULT;
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/3] drm/amdkfd: Implement queue priority controls for gfx10

2019-11-11 Thread Yong Zhao
Ported from gfx9.

Change-Id: I388dc7c609ed724a6d600840f8e7317d9c2c877d
Signed-off-by: Yong Zhao 
---
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 4a236b2c2354..4884cd6c65ce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -66,6 +66,12 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se3);
 }
 
+static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
+{
+   m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
+   m->cp_hqd_queue_priority = q->priority;
+}
+
 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
 {
@@ -109,9 +115,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
 
-   m->cp_hqd_pipe_priority = 1;
-   m->cp_hqd_queue_priority = 15;
-
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
@@ -208,6 +211,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_ctx_save_control = 0;
 
update_cu_mask(mm, mqd, q);
+   set_priority(m, q);
 
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/3] drm/amdkfd: Update get_wave_state() for GFX10

2019-11-11 Thread Yong Zhao
Given control stack is now in the userspace context save restore area
on GFX10, the same as GFX8, it is not needed to copy it back to userspace.

Change-Id: I063ddc3026eefa57713ec47b466a90f9bf9d49b8
Signed-off-by: Yong Zhao 
---
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 4884cd6c65ce..954dc8ac4ff1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -251,18 +251,22 @@ static int get_wave_state(struct mqd_manager *mm, void 
*mqd,
 {
struct v10_compute_mqd *m;
 
-   /* Control stack is located one page after MQD. */
-   void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
-
m = get_mqd(mqd);
 
+   /* Control stack is written backwards, while workgroup context data
+* is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
+* Current position is at m->cp_hqd_cntl_stack_offset and
+* m->cp_hqd_wg_state_offset, respectively.
+*/
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
 
-   if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
-   return -EFAULT;
+   /* Control stack is not copied to user mode for GFXv10 because
+* it's part of the context save area that is already
+* accessible to user mode
+*/
 
return 0;
 }
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 2/2] drm/amdkfd: Avoid using doorbell_off as offset in process doorbell pages

2019-11-11 Thread Yong Zhao
The NULL pointer is not an issue, because for DIQ, the if (q) condition, 
which guards the section but is now shown, will never be satisfied. 
Anyway, I still added the NULL pointer check.


With that, I have pushed the change.


Yong

On 2019-11-11 3:51 p.m., Felix Kuehling wrote:

On 2019-11-11 15:43, Felix Kuehling wrote:

On 2019-11-01 16:10, Zhao, Yong wrote:

dorbell_off in the queue properties is mainly used for the doorbell dw
offset in pci bar. We should not set it to the doorbell byte offset in
process doorbell pages. This makes the code much easier to read.


I kind of agree. I think what's confusing is that the 
queue_properties structure is used for two different purposes.


 1. For storing queue properties provided by user mode through KFD ioctls
 2. A subset of struct queue passed to mqd_manager and elsewhere
(that's why some driver state is creeping into it)

Maybe a follow-up could cleanly separate the queue properties from 
the queue driver state. That would probably change some internal 
interfaces to use struct queue instead of queue_properties.


Anyway, this patch is

Reviewed-by: Felix Kuehling 

I pointed out a missing NULL pointer check inline near the end of the 
patch. I should have mentioned it here. Please fix that before you submit.


Thanks,
  Felix



Change-Id: I553045ff9fcb3676900c92d10426f2ceb3660005
Signed-off-by: Yong Zhao
---
  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 12 ++--
  drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c  |  2 +-
  drivers/gpu/drm/amd/amdkfd/kfd_priv.h|  3 ++-
  .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c   |  8 ++--
  4 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index d9e36dbf13d5..b91993753b82 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -258,6 +258,7 @@ static int kfd_ioctl_create_queue(struct file *filep, 
struct kfd_process *p,
unsigned int queue_id;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
+   uint32_t doorbell_offset_in_process = 0;
  
  	memset(&q_properties, 0, sizeof(struct queue_properties));
  
@@ -286,7 +287,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,

p->pasid,
dev->id);
  
-	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);

+   err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+   &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
  
@@ -298,12 +300,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,

args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
args->doorbell_offset <<= PAGE_SHIFT;
if (KFD_IS_SOC15(dev->device_info->asic_family))
-   /* On SOC15 ASICs, doorbell allocation must be
-* per-device, and independent from the per-process
-* queue_id. Return the doorbell offset within the
-* doorbell aperture to user mode.
+   /* On SOC15 ASICs, include the doorbell offset within the
+* process doorbell frame, which could be 1 page or 2 pages.
 */
-   args->doorbell_offset |= q_properties.doorbell_off;
+   args->doorbell_offset |= doorbell_offset_in_process;
  
  	mutex_unlock(&p->mutex);
  
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c

index d59f2cd056c6..1d33c4f25263 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
properties.type = KFD_QUEUE_TYPE_DIQ;
  
  	status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,

-   &properties, &qid);
+   &properties, &qid, NULL);
  
  	if (status) {

pr_err("Failed to create DIQ\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 7c561c98f2e2..66bae8f2dad1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -907,7 +907,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
-   unsigned int *qid);
+   unsigned int *qid,
+   uint32_t *p_doorbell_offset_in_process);
  int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
  int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
di

Re: [PATCH] drm/amd/display: Fix unsigned variable compared to less than zero

2019-11-11 Thread Alex Deucher
Applied.  Thanks!

Alex

On Mon, Nov 11, 2019 at 2:44 PM Gustavo A. R. Silva
 wrote:
>
>
>
> On 11/11/19 11:46, Mikita Lipski wrote:
> >
> > Thanks for catching it!
> >
>
> Glad to help out. :)
>
> > Reviewed-by: Mikita Lipski 
> >
>
> Thanks
> --
> Gustavo
>
> >
> > On 11.11.2019 12:25, Gustavo A. R. Silva wrote:
> >> Currenly, the error check below on variable*vcpi_slots*  is always
> >> false because it is a uint64_t type variable, hence, the values
> >> this variable can hold are never less than zero:
> >>
> >> drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:
> >> 4870 if (dm_new_connector_state->vcpi_slots < 0) {
> >> 4871 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", 
> >> (int)dm_new_connector_stat e->vcpi_slots);
> >> 4872 return dm_new_connector_state->vcpi_slots;
> >> 4873 }
> >>
> >> Fix this by making*vcpi_slots*  of int type
> >>
> >> Addresses-Coverity: 1487838 ("Unsigned compared against 0")
> >> Fixes: b4c578f08378 ("drm/amd/display: Add MST atomic routines")
> >> Signed-off-by: Gustavo A. R. Silva
> >> ---
> >>   drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +-
> >>   1 file changed, 1 insertion(+), 1 deletion(-)
> >>
> >> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
> >> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
> >> index 6db07e9e33ab..a8fc90a927d6 100644
> >> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
> >> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
> >> @@ -403,7 +403,7 @@ struct dm_connector_state {
> >>   bool underscan_enable;
> >>   bool freesync_capable;
> >>   uint8_t abm_level;
> >> -uint64_t vcpi_slots;
> >> +int vcpi_slots;
> >>   uint64_t pbn;
> >>   };
> >>   -- 2.23.0
> >
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amdgpu/soc15: move struct definition around to align with other soc15 asics

2019-11-11 Thread Alex Deucher
Move reset_method next to reset callback to match the struct layout and
the other definition in this file.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8e1640bc07af..305ad3eec987 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1007,6 +1007,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.read_bios_from_rom = &soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
+   .reset_method = &soc15_asic_reset_method,
.set_vga_state = &soc15_vga_set_state,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
@@ -1019,7 +1020,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_pcie_usage = &vega20_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
-   .reset_method = &soc15_asic_reset_method
 };
 
 static int soc15_common_early_init(void *handle)
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

2019-11-11 Thread Andrey Grodzovsky
Emily - is there a particular scenario to reproduce this ? I am trying 
with libdrm deadlock test and artificially delaying the GPU reset logic 
until after the guilty job is signaling but indeed nothing bad happens 
as drm_sched_cleanup_jobs returns early because there is a reset in 
progress and so the bad job is not getting released while GPU reset is 
running.


Can you provide event tracing for timer, dma_fence and gpu_scheduler for 
when the problem happens ?


Andrey

On 11/11/19 4:05 AM, Deng, Emily wrote:

Hi Christian and Andrey,
  The issue I encountered is the bad job is freeing after entering to the 
amdgpu_device_gpu_recover. Don't know why, as per Christian said, it will call 
cancel_delayed_work in drm_sched_cleanup_jobs.

Best wishes
Emily Deng




-Original Message-
From: amd-gfx  On Behalf Of Deng,
Emily
Sent: Monday, November 11, 2019 3:19 PM
To: Grodzovsky, Andrey ; Koenig, Christian
; amd-gfx@lists.freedesktop.org
Subject: RE: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

Hi Andrey,
I don’t think your patch will help for this. As it will may call
kthread_should_park in drm_sched_cleanup_jobs first, and then call
kcl_kthread_park. And then it still has a race between the 2 threads.

Best wishes
Emily Deng




-Original Message-
From: Grodzovsky, Andrey 
Sent: Saturday, November 9, 2019 3:01 AM
To: Koenig, Christian ; Deng, Emily
; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr


On 11/8/19 5:35 AM, Koenig, Christian wrote:

Hi Emily,

exactly that can't happen. See here:


      /* Don't destroy jobs while the timeout worker is running
*/
      if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
      !cancel_delayed_work(&sched->work_tdr))
      return NULL;

We never free jobs while the timeout working is running to prevent
exactly that issue.


I don't think this protects us if drm_sched_cleanup_jobs is called for
scheduler which didn't experience a timeout, in
amdgpu_device_gpu_recover we access
sched->ring_mirror_list for all the schedulers on a device so this
sched->condition
above won't protect us. What in fact could help maybe is my recent
patch
541c521 drm/sched: Avoid job cleanup if sched thread is parked. because
we do park each of the scheduler threads during tdr job before trying
to access
sched->ring_mirror_list.

Emily - did you see this problem with that patch in place ? I only
pushed it yesterday.

Andrey



Regards,
Christian.

Am 08.11.19 um 11:32 schrieb Deng, Emily:

Hi Christian,
The drm_sched_job_timedout-> amdgpu_job_timedout call

amdgpu_device_gpu_recover. I mean the main scheduler free the jobs
while in amdgpu_device_gpu_recover, and before calling drm_sched_stop.

Best wishes
Emily Deng




-Original Message-
From: Koenig, Christian 
Sent: Friday, November 8, 2019 6:26 PM
To: Deng, Emily ; amd-

g...@lists.freedesktop.org

Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

Hi Emily,

well who is calling amdgpu_device_gpu_recover() in this case?

When it's not the scheduler we shouldn't have a guilty job in the first

place.

Regards,
Christian.

Am 08.11.19 um 11:22 schrieb Deng, Emily:

Hi Chrisitan,
 No, I am with the new branch and also has the patch. Even
it are freed by

main scheduler, how we could avoid main scheduler to free jobs
while enter to function amdgpu_device_gpu_recover?

Best wishes
Emily Deng




-Original Message-
From: Koenig, Christian 
Sent: Friday, November 8, 2019 6:15 PM
To: Deng, Emily ;
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for
tdr

Hi Emily,

in this case you are on an old code branch.

Jobs are freed now by the main scheduler thread and only if no
timeout handler is running.

See this patch here:

commit 5918045c4ed492fb5813f980dcf89a90fefd0a4e
Author: Christian König 
Date:   Thu Apr 18 11:00:21 2019 -0400

    drm/scheduler: rework job destruction

Regards,
Christian.

Am 08.11.19 um 11:11 schrieb Deng, Emily:

Hi Christian,
  Please refer to follow log, when it enter to
amdgpu_device_gpu_recover

function, the bad job 5086879e is freeing in function
amdgpu_job_free_cb  at the same time, because of the hardware
fence

signal.

But amdgpu_device_gpu_recover goes faster, at this case, the
s_fence is already freed, but job is not freed in time. Then this
issue

occurs.

[  449.792189] [drm:amdgpu_job_timedout [amdgpu]] *ERROR* ring

sdma0

timeout, signaled seq=2481, emitted seq=2483 [  449.793202]
[drm:amdgpu_job_timedout [amdgpu]] *ERROR* Process

information:

process  pid 0 thread  pid 0, s_job:5086879e [
449.794163] amdgpu
:00:08.0: GPU reset begin!

[  449.794175] Emily:amdgpu_job_free_cb,Process information:
process pid 0 thread  pid 0, s_job:5086879e [
449.794221] Emily:amdgpu_job_free_cb,Process information:
process pid 0 thread pid 0, s_job:66eb74ab [
449.794222] Emil

Re: [PATCH] drm/amd/display: Use pixel encoding 444 for dongle usb-c to hdmi

2019-11-11 Thread Harry Wentland
On 2019-10-08 2:15 p.m., Julien Isorce wrote:
> Hi Harry,
> 
> I can reproduce on LG, Samsung and NEC monitors.
> 
> "Have you checked whether the driver picks RGB or YCBCR420 without your
> patch?" -> it was selecting RGB .
> 
> For example on https://commons.wikimedia.org/wiki/File:Gray_scale.jpg ,
> the second band from the left, will be entirely pinkish.
> Since the issue also happens without dongle, so with a direct cable from
> the miniDP from the graphic card to DisplayPort on the screen I think
> there is more serious issue with RGB output in amdgpu. But it is not
> easy to reproduce, you should try on above image.
> 

I haven't had time to repro this issue. Can you post a picture of this
problem somewhere? Ideally with a bug description at
https://gitlab.freedesktop.org/drm/amd/issues

> In any case, the goal with the patch is just to get the same output when
> using 2 screens at the same time, one connected to hdmi output of the
> graphic card and one connected  to usb-c to graphic card (hdmi cable
> with dongle). So prior this patch, the first one would use YCbCr 444 and
> the second would use RGB.
> After this patch, both will use YCbCr 444 (both are hdmi).

I've been hesitant about this patch since it changes driver policy which
is not something I like to do without very good reason and understanding
all the implications.

That said, treating an DP-HDMI adapter like a native HDMI connection
rather than DP is not unreasonable. I'm still curious, though, why this
is required at all. As mentioned above a picture of the problem (ideally
showing the monitors side-by-side) would help.

Harry


> The patch does not change the case for miniDP to DisplayPort, the driver
> will still use RGB. Because maybe the RGB issue is also specific to that
> graphic card which
> is VEGA"M". So that is why the patch only tries to match hdmi cases
> together, whether it is direct connection or through usb-c.
> 
> -
> Julien
> 
> 
> 
> On Tue, Oct 8, 2019 at 10:44 AM Harry Wentland  > wrote:
> 
> Hi Julien,
> 
> curious which monitor you're using.
> 
> Have you checked whether the driver picks RGB or YCBCR420 without your
> patch?
> 
> I'm not sure I understand how the pinkish color issue looks. Do you see
> a pinkish color at the transition from grey to another color? Or is the
> entire grey area pinkish?
> 
> Thanks,
> Harry
> 
> On 2019-10-08 12:06 p.m., Julien Isorce wrote:
> > Hi,
> >
> > Gentle ping ?
> >
> > Thx
> > Julien
> >
> > On Tue, Oct 1, 2019 at 3:21 PM Julien Isorce
> mailto:julien.iso...@gmail.com>
> > >>
> wrote:
> >
> >     Fix pinkish color issue around grey areas. This also happens
> >     when not using any dongle so directly with a usb-c to Display
> >     Port cable. Meaning there is something wrong when using pixel
> >     encoding RGB with amd driver in the general case. In the meantime
> >     just use the same pixel encoding as when using HDMI without
> dongle.
> >     This way users will see the same thing on 2 identical screens when
> >     one is connected with hdmi-to-hdmi and the other is connected with
> >     usb-c-to-hdmi.
> >
> >     Signed-off-by: Julien Isorce  
> >     >>
> >     ---
> >      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 +
> >      1 file changed, 5 insertions(+)
> >
> >     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> >     b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> >     index d3f404f097eb..8139dcc0bfba 100644
> >     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> >     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> >     @@ -3313,6 +3313,7 @@ static void
> >     fill_stream_properties_from_drm_display_mode(
> >      {
> >             struct dc_crtc_timing *timing_out = &stream->timing;
> >             const struct drm_display_info *info =
> &connector->display_info;
> >     +       const struct dc_link *link = stream->sink->link;
> >
> >             memset(timing_out, 0, sizeof(struct dc_crtc_timing));
> >
> >     @@ -3327,6 +3328,10 @@ static void
> >     fill_stream_properties_from_drm_display_mode(
> >             else if ((connector->display_info.color_formats &
> >     DRM_COLOR_FORMAT_YCRCB444)
> >                             && stream->signal ==
> SIGNAL_TYPE_HDMI_TYPE_A)
> >                     timing_out->pixel_encoding =
> PIXEL_ENCODING_YCBCR444;
> >     +       else if ((connector->display_info.color_formats &
> >     DRM_COLOR_FORMAT_YCRCB444)
> >     +                       && stream->sink->sink_signal ==
> >     SIGNAL_TYPE_DISPLAY_PORT
> >     +     

Re: [PATCH 2/2] drm/amdkfd: Avoid using doorbell_off as offset in process doorbell pages

2019-11-11 Thread Felix Kuehling

On 2019-11-11 15:43, Felix Kuehling wrote:

On 2019-11-01 16:10, Zhao, Yong wrote:

dorbell_off in the queue properties is mainly used for the doorbell dw
offset in pci bar. We should not set it to the doorbell byte offset in
process doorbell pages. This makes the code much easier to read.


I kind of agree. I think what's confusing is that the queue_properties 
structure is used for two different purposes.


 1. For storing queue properties provided by user mode through KFD ioctls
 2. A subset of struct queue passed to mqd_manager and elsewhere
(that's why some driver state is creeping into it)

Maybe a follow-up could cleanly separate the queue properties from the 
queue driver state. That would probably change some internal 
interfaces to use struct queue instead of queue_properties.


Anyway, this patch is

Reviewed-by: Felix Kuehling 

I pointed out a missing NULL pointer check inline near the end of the 
patch. I should have mentioned it here. Please fix that before you submit.


Thanks,
  Felix



Change-Id: I553045ff9fcb3676900c92d10426f2ceb3660005
Signed-off-by: Yong Zhao
---
  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 12 ++--
  drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c  |  2 +-
  drivers/gpu/drm/amd/amdkfd/kfd_priv.h|  3 ++-
  .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c   |  8 ++--
  4 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index d9e36dbf13d5..b91993753b82 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -258,6 +258,7 @@ static int kfd_ioctl_create_queue(struct file *filep, 
struct kfd_process *p,
unsigned int queue_id;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
+   uint32_t doorbell_offset_in_process = 0;
  
  	memset(&q_properties, 0, sizeof(struct queue_properties));
  
@@ -286,7 +287,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,

p->pasid,
dev->id);
  
-	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);

+   err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+   &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
  
@@ -298,12 +300,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,

args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
args->doorbell_offset <<= PAGE_SHIFT;
if (KFD_IS_SOC15(dev->device_info->asic_family))
-   /* On SOC15 ASICs, doorbell allocation must be
-* per-device, and independent from the per-process
-* queue_id. Return the doorbell offset within the
-* doorbell aperture to user mode.
+   /* On SOC15 ASICs, include the doorbell offset within the
+* process doorbell frame, which could be 1 page or 2 pages.
 */
-   args->doorbell_offset |= q_properties.doorbell_off;
+   args->doorbell_offset |= doorbell_offset_in_process;
  
  	mutex_unlock(&p->mutex);
  
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c

index d59f2cd056c6..1d33c4f25263 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
properties.type = KFD_QUEUE_TYPE_DIQ;
  
  	status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,

-   &properties, &qid);
+   &properties, &qid, NULL);
  
  	if (status) {

pr_err("Failed to create DIQ\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 7c561c98f2e2..66bae8f2dad1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -907,7 +907,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
-   unsigned int *qid);
+   unsigned int *qid,
+   uint32_t *p_doorbell_offset_in_process);
  int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
  int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 8509814a6ff0..48185d2957e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager

[PATCH] drm/amdgpu/vcn: finish delay work before release resources

2019-11-11 Thread Alex Deucher
flush/cancel delayed works before doing finalization
to avoid concurrently requests.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3199e4a5ff12..9d870444d7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 {
int i, j;
 
+   cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
  &adev->vcn.dpg_sram_gpu_addr,
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 2/2] drm/amdkfd: Avoid using doorbell_off as offset in process doorbell pages

2019-11-11 Thread Felix Kuehling

On 2019-11-01 16:10, Zhao, Yong wrote:

dorbell_off in the queue properties is mainly used for the doorbell dw
offset in pci bar. We should not set it to the doorbell byte offset in
process doorbell pages. This makes the code much easier to read.


I kind of agree. I think what's confusing is that the queue_properties 
structure is used for two different purposes.


1. For storing queue properties provided by user mode through KFD ioctls
2. A subset of struct queue passed to mqd_manager and elsewhere (that's
   why some driver state is creeping into it)

Maybe a follow-up could cleanly separate the queue properties from the 
queue driver state. That would probably change some internal interfaces 
to use struct queue instead of queue_properties.


Anyway, this patch is

Reviewed-by: Felix Kuehling 


Change-Id: I553045ff9fcb3676900c92d10426f2ceb3660005
Signed-off-by: Yong Zhao 
---
  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 12 ++--
  drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c  |  2 +-
  drivers/gpu/drm/amd/amdkfd/kfd_priv.h|  3 ++-
  .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c   |  8 ++--
  4 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index d9e36dbf13d5..b91993753b82 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -258,6 +258,7 @@ static int kfd_ioctl_create_queue(struct file *filep, 
struct kfd_process *p,
unsigned int queue_id;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
+   uint32_t doorbell_offset_in_process = 0;
  
  	memset(&q_properties, 0, sizeof(struct queue_properties));
  
@@ -286,7 +287,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,

p->pasid,
dev->id);
  
-	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);

+   err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+   &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
  
@@ -298,12 +300,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,

args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
args->doorbell_offset <<= PAGE_SHIFT;
if (KFD_IS_SOC15(dev->device_info->asic_family))
-   /* On SOC15 ASICs, doorbell allocation must be
-* per-device, and independent from the per-process
-* queue_id. Return the doorbell offset within the
-* doorbell aperture to user mode.
+   /* On SOC15 ASICs, include the doorbell offset within the
+* process doorbell frame, which could be 1 page or 2 pages.
 */
-   args->doorbell_offset |= q_properties.doorbell_off;
+   args->doorbell_offset |= doorbell_offset_in_process;
  
  	mutex_unlock(&p->mutex);
  
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c

index d59f2cd056c6..1d33c4f25263 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
properties.type = KFD_QUEUE_TYPE_DIQ;
  
  	status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,

-   &properties, &qid);
+   &properties, &qid, NULL);
  
  	if (status) {

pr_err("Failed to create DIQ\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 7c561c98f2e2..66bae8f2dad1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -907,7 +907,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
-   unsigned int *qid);
+   unsigned int *qid,
+   uint32_t *p_doorbell_offset_in_process);
  int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
  int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 8509814a6ff0..48185d2957e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -192,7 +192,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue

Re: [PATCH 00/14] HDCP 2 Content Protection v2

2019-11-11 Thread Harry Wentland
On 2019-11-07 10:56 a.m., Bhawanpreet Lakha wrote:
> Just like with the 1.4 series of patches This only introduces the
> ability to authenticate and encrypt the link. These patches by
> themselves don't constitute a complete and compliant HDCP content
> protection solution but are a requirement for such a solution.
> 
> Summary of the changes
> *Adds 2.2 code to the module
> *Enabled HDCP 2.2 authentication/encryption
> *Add type0/1 selection for 2.2
> *Add MST support (Only tested single daisy chain usecase)
> *use drm_hdcp.h for macros/defines
> *fix static analysis bug
> 
> v2:
>   -use macros/defines from drm_hdcp.h
>   drm/amd/display: add and use defines from drm_hdcp.h
>   drm/amd/display: use drm defines for MAX CASCADE MASK
>   drm/amd/display: split rxstatus for hdmi and dp
>   -fix static analysis bug
>   drm/amd/display: Fix static analysis bug in validate_bksv
> 

Changes are
Reviewed-by: Harry Wentland 

Harry

> 
> Bhawanpreet Lakha (14):
>   drm/amd/display: Add PSP block to verify HDCP2.2 steps
>   drm/amd/display: Add DDC handles for HDCP2.2
>   drm/amd/display: Add execution and transition states for HDCP2.2
>   drm/amd/display: Add logging for HDCP2.2
>   drm/amd/display: Change ERROR to WARN for HDCP module
>   drm/amd/display: Enable HDCP 2.2
>   drm/amd/display: Handle hdcp2.2 type0/1 in dm
>   drm/amd/display: Refactor HDCP to handle multiple displays per link
>   drm/amd/display: add force Type0/1 flag
>   drm/amd/display: Refactor HDCP encryption status update
>   drm/amd/display: add and use defines from drm_hdcp.h
>   drm/amd/display: use drm defines for MAX CASCADE MASK
>   drm/amd/display: split rxstatus for hdmi and dp
>   drm/amd/display: Fix static analysis bug in validate_bksv
> 
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  26 +-
>  .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c|  64 +-
>  .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.h|   9 +-
>  .../gpu/drm/amd/display/modules/hdcp/Makefile |   3 +-
>  .../gpu/drm/amd/display/modules/hdcp/hdcp.c   | 101 +-
>  .../gpu/drm/amd/display/modules/hdcp/hdcp.h   | 197 +++-
>  .../display/modules/hdcp/hdcp1_execution.c|  40 +-
>  .../display/modules/hdcp/hdcp2_execution.c| 884 ++
>  .../display/modules/hdcp/hdcp2_transition.c   | 674 +
>  .../drm/amd/display/modules/hdcp/hdcp_ddc.c   | 326 +++
>  .../drm/amd/display/modules/hdcp/hdcp_log.c   | 118 +++
>  .../drm/amd/display/modules/hdcp/hdcp_log.h   |  98 +-
>  .../drm/amd/display/modules/hdcp/hdcp_psp.c   | 511 +-
>  .../drm/amd/display/modules/hdcp/hdcp_psp.h   | 194 
>  .../drm/amd/display/modules/inc/mod_hdcp.h|  15 +-
>  15 files changed, 3137 insertions(+), 123 deletions(-)
>  create mode 100644 drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
>  create mode 100644 
> drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/radeon: Clean up code in radeon_pci_shutdown()

2019-11-11 Thread Alex Deucher
On Mon, Nov 11, 2019 at 3:29 PM Kyle Mahlkuch
 wrote:
>
> From: KyleMahlkuch 
>
> This fixes the formatting on one comment and consolidates the
> pci_get_drvdata() into the radeon_suspend_kms().
>
> Signed-off-by: Kyle Mahlkuch 

Applied.  Thanks!

Alex

> ---
>  drivers/gpu/drm/radeon/radeon_drv.c | 9 +++--
>  1 file changed, 3 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/radeon/radeon_drv.c 
> b/drivers/gpu/drm/radeon/radeon_drv.c
> index 4528f4d..357d29a 100644
> --- a/drivers/gpu/drm/radeon/radeon_drv.c
> +++ b/drivers/gpu/drm/radeon/radeon_drv.c
> @@ -379,10 +379,6 @@ static int radeon_pci_probe(struct pci_dev *pdev,
>  static void
>  radeon_pci_shutdown(struct pci_dev *pdev)
>  {
> -#ifdef CONFIG_PPC64
> -   struct drm_device *ddev = pci_get_drvdata(pdev);
> -#endif
> -
> /* if we are running in a VM, make sure the device
>  * torn down properly on reboot/shutdown
>  */
> @@ -390,13 +386,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
> radeon_pci_remove(pdev);
>
>  #ifdef CONFIG_PPC64
> -   /* Some adapters need to be suspended before a
> +   /*
> +* Some adapters need to be suspended before a
>  * shutdown occurs in order to prevent an error
>  * during kexec.
>  * Make this power specific becauase it breaks
>  * some non-power boards.
>  */
> -   radeon_suspend_kms(ddev, true, true, false);
> +   radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
>  #endif
>  }
>
> --
> 1.8.3.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 1/2] drm/amdkfd: Use better name to indicate the offset is in dwords

2019-11-11 Thread Felix Kuehling

On 2019-11-01 16:10, Zhao, Yong wrote:

Change-Id: I75da23bba90231762cf58da3170f5bb77ece45ed
Signed-off-by: Yong Zhao 


I agree with the name changes. One suggestion for a comment inline. With 
that fixed, this patch is


Reviewed-by: Felix Kuehling 



---
  .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  |  2 +-
  drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c  | 14 +++---
  drivers/gpu/drm/amd/amdkfd/kfd_priv.h  |  8 
  3 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 984c2f2b24b6..4503fb26fe5b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device 
*qpd, struct queue *q)
}
  
  	q->properties.doorbell_off =

-   kfd_doorbell_id_to_offset(dev, q->process,
+   kfd_get_doorbell_dw_offset_from_bar(dev, q->process,
  q->doorbell_id);
  
  	return 0;

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index ebe79bf00145..f904355c44a1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
  
-	kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);

+   kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32);
  
  	kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,

   kfd_doorbell_process_slice(kfd));
@@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
pr_debug("doorbell base   == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
  
-	pr_debug("doorbell_id_offset  == 0x%08lX\n",

-   kfd->doorbell_id_offset);
+   pr_debug("doorbell_base_dw_offset  == 0x%08lX\n",
+   kfd->doorbell_base_dw_offset);
  
  	pr_debug("doorbell_process_limit  == 0x%08lX\n",

doorbell_process_limit);
@@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 * Calculating the kernel doorbell offset using the first
 * doorbell page.
 */
-   *doorbell_off = kfd->doorbell_id_offset + inx;
+   *doorbell_off = kfd->doorbell_base_dw_offset + inx;
  
  	pr_debug("Get kernel queue doorbell\n"

" doorbell offset   == 0x%08X\n"
@@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
}
  }
  
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,

+unsigned int kfd_get_doorbell_dw_offset_from_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id)
  {
/*
-* doorbell_id_offset accounts for doorbells taken by KGD.
+* doorbell_base_dw_offset accounts for doorbells taken by KGD.
 * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
 * the process's doorbells. The offset returned is in dword
 * units regardless of the ASIC-dependent doorbell size.
 */
-   return kfd->doorbell_id_offset +
+   return kfd->doorbell_base_dw_offset +
process->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 62db4d20ed32..7c561c98f2e2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -238,9 +238,9 @@ struct kfd_dev {
 * KFD. It is aligned for mapping
 * into user mode
 */
-   size_t doorbell_id_offset;  /* Doorbell offset (from KFD doorbell
-* to HW doorbell, GFX reserved some
-* at the start)
+   size_t doorbell_base_dw_offset; /* Doorbell dword offset (from KFD
+* doorbell to PCI doorbell bar,
+* GFX reserved some at the start)


This is still a bit convoluted and sounds backwards. I suggest this wording:

    Offset from the start of the PCI doorbell BAR to the first KFD 
doorbell in dwords


Regards,
  Felix


 */
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
   * p

[PATCH] drm/radeon: Clean up code in radeon_pci_shutdown()

2019-11-11 Thread Kyle Mahlkuch
From: KyleMahlkuch 

This fixes the formatting on one comment and consolidates the
pci_get_drvdata() into the radeon_suspend_kms().

Signed-off-by: Kyle Mahlkuch 
---
 drivers/gpu/drm/radeon/radeon_drv.c | 9 +++--
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_drv.c 
b/drivers/gpu/drm/radeon/radeon_drv.c
index 4528f4d..357d29a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -379,10 +379,6 @@ static int radeon_pci_probe(struct pci_dev *pdev,
 static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PPC64
-   struct drm_device *ddev = pci_get_drvdata(pdev);
-#endif
-
/* if we are running in a VM, make sure the device
 * torn down properly on reboot/shutdown
 */
@@ -390,13 +386,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
radeon_pci_remove(pdev);
 
 #ifdef CONFIG_PPC64
-   /* Some adapters need to be suspended before a
+   /*
+* Some adapters need to be suspended before a
 * shutdown occurs in order to prevent an error
 * during kexec.
 * Make this power specific becauase it breaks
 * some non-power boards.
 */
-   radeon_suspend_kms(ddev, true, true, false);
+   radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
 #endif
 }
 
-- 
1.8.3.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH -next] drm/amd/display: remove set but not used variable 'bpc'

2019-11-11 Thread Alex Deucher
On Mon, Nov 11, 2019 at 1:01 PM Alex Deucher  wrote:
>
> Applied.  Thanks!

I've dropped this as it leads to a warning in the code since
get_color_depth is no longer used.  Care to fix that up as well?

Thanks!

Alex

>
> Alex
>
> On Sun, Nov 10, 2019 at 9:30 PM YueHaibing  wrote:
> >
> > Fixes gcc '-Wunused-but-set-variable' warning:
> >
> > drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c: In function 
> > get_pbn_from_timing:
> > drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2364:11: warning:
> >  variable bpc set but not used [-Wunused-but-set-variable]
> >
> > It is not used since commit e49f69363adf ("drm/amd/display: use
> > proper formula to calculate bandwidth from timing")
> >
> > Signed-off-by: YueHaibing 
> > ---
> >  drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 --
> >  1 file changed, 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
> > b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> > index bdc8be3..53394e2 100644
> > --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> > +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> > @@ -2653,13 +2653,11 @@ static int get_color_depth(enum dc_color_depth 
> > color_depth)
> >
> >  static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
> >  {
> > -   uint32_t bpc;
> > uint64_t kbps;
> > struct fixed31_32 peak_kbps;
> > uint32_t numerator;
> > uint32_t denominator;
> >
> > -   bpc = 
> > get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
> > kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
> >
> > /*
> > --
> > 2.7.4
> >
> >
> > ___
> > dri-devel mailing list
> > dri-de...@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdkfd: Rename create_cp_queue() to init_user_queue()

2019-11-11 Thread Felix Kuehling

On 2019-11-01 16:12, Zhao, Yong wrote:

create_cp_queue() could also work with SDMA queues, so we should rename
it.

Change-Id: I76cbaed8fa95dd9062d786cbc1dd037ff041da9d
Signed-off-by: Yong Zhao 


The name change makes sense. This patch is

Reviewed-by: Felix Kuehling 



---
  drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 6 +++---
  1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 48185d2957e9..ebb2f69b438c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqm->queue_slot_bitmap = NULL;
  }
  
-static int create_cp_queue(struct process_queue_manager *pqm,

+static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, unsigned int qid)
@@ -251,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
  
-		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);

+   retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -272,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
  
-		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);

+   retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: Fix unsigned variable compared to less than zero

2019-11-11 Thread Gustavo A. R. Silva


On 11/11/19 11:46, Mikita Lipski wrote:
> 
> Thanks for catching it!
> 

Glad to help out. :)

> Reviewed-by: Mikita Lipski 
> 

Thanks
--
Gustavo

> 
> On 11.11.2019 12:25, Gustavo A. R. Silva wrote:
>> Currenly, the error check below on variable*vcpi_slots*  is always
>> false because it is a uint64_t type variable, hence, the values
>> this variable can hold are never less than zero:
>>
>> drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:
>> 4870 if (dm_new_connector_state->vcpi_slots < 0) {
>> 4871 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", 
>> (int)dm_new_connector_stat e->vcpi_slots);
>> 4872 return dm_new_connector_state->vcpi_slots;
>> 4873 }
>>
>> Fix this by making*vcpi_slots*  of int type
>>
>> Addresses-Coverity: 1487838 ("Unsigned compared against 0")
>> Fixes: b4c578f08378 ("drm/amd/display: Add MST atomic routines")
>> Signed-off-by: Gustavo A. R. Silva
>> ---
>>   drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +-
>>   1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
>> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
>> index 6db07e9e33ab..a8fc90a927d6 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
>> @@ -403,7 +403,7 @@ struct dm_connector_state {
>>   bool underscan_enable;
>>   bool freesync_capable;
>>   uint8_t abm_level;
>> -    uint64_t vcpi_slots;
>> +    int vcpi_slots;
>>   uint64_t pbn;
>>   };
>>   -- 2.23.0
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v2 0/2] drm: replace magic numbers

2019-11-11 Thread Bjorn Helgaas
From: Bjorn Helgaas 

amdgpu and radeon do a bit of mucking with the PCIe Link Control 2
register, some of it using hard-coded magic numbers.  The idea here is to
replace those with #defines.

I don't intend the Target Link Speed patch to change anything, so it should
be straightforward to review.

Since v1:
  - Add my signed-off-by and Alex's reviewed-by.

Bjorn Helgaas (2):
  drm: replace incorrect Compliance/Margin magic numbers with
PCI_EXP_LNKCTL2 definitions
  drm: replace Target Link Speed magic numbers with PCI_EXP_LNKCTL2
definitions

 drivers/gpu/drm/amd/amdgpu/cik.c | 22 ++
 drivers/gpu/drm/amd/amdgpu/si.c  | 18 +++---
 drivers/gpu/drm/radeon/cik.c | 22 ++
 drivers/gpu/drm/radeon/si.c  | 22 ++
 include/uapi/linux/pci_regs.h|  2 ++
 5 files changed, 55 insertions(+), 31 deletions(-)

-- 
2.24.0.rc1.363.gb1bccd3e3d-goog

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/2] drm: replace incorrect Compliance/Margin magic numbers with PCI_EXP_LNKCTL2 definitions

2019-11-11 Thread Bjorn Helgaas
From: Bjorn Helgaas 

Add definitions for these PCIe Link Control 2 register fields:

  Enter Compliance
  Transmit Margin

and use them in amdgpu and radeon.

NOTE: This is a functional change because "7 << 9" was apparently a typo.
That mask included the high order bit of Transmit Margin, the Enter
Modified Compliance bit, and the Compliance SOS bit, but I think what
was intended was the 3-bit Transmit Margin field at bits 9:7.

Signed-off-by: Bjorn Helgaas 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/cik.c | 14 ++
 drivers/gpu/drm/amd/amdgpu/si.c  | 10 +++---
 drivers/gpu/drm/radeon/cik.c | 14 ++
 drivers/gpu/drm/radeon/si.c  | 14 ++
 include/uapi/linux/pci_regs.h|  2 ++
 5 files changed, 39 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index b81bb414fcb3..e4a595cdd4c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1498,13 +1498,19 @@ static void cik_pcie_gen3_enable(struct amdgpu_device 
*adev)
 
/* linkctl2 */
pci_read_config_word(root, bridge_pos + 
PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~((1 << 4) | (7 << 9));
-   tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+   tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN);
+   tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN));
pci_write_config_word(root, bridge_pos + 
PCI_EXP_LNKCTL2, tmp16);
 
pci_read_config_word(adev->pdev, gpu_pos + 
PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~((1 << 4) | (7 << 9));
-   tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+   tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN);
+   tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN));
pci_write_config_word(adev->pdev, gpu_pos + 
PCI_EXP_LNKCTL2, tmp16);
 
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 493af42152f2..cf543410a424 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1737,12 +1737,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device 
*adev)
pci_write_config_word(adev->pdev, gpu_pos + 
PCI_EXP_LNKCTL, tmp16);
 
pci_read_config_word(root, bridge_pos + 
PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~((1 << 4) | (7 << 9));
-   tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+   tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN);
+   tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN));
pci_write_config_word(root, bridge_pos + 
PCI_EXP_LNKCTL2, tmp16);
 
pci_read_config_word(adev->pdev, gpu_pos + 
PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~((1 << 4) | (7 << 9));
+   tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN);
tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
pci_write_config_word(adev->pdev, gpu_pos + 
PCI_EXP_LNKCTL2, tmp16);
 
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 62eab82a64f9..95ffa0bff2d8 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9619,13 +9619,19 @@ static void cik_pcie_gen3_enable(struct radeon_device 
*rdev)
 
/* linkctl2 */
pci_read_config_word(root, bridge_pos + 
PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~((1 << 4) | (7 << 9));
-   tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+   tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+  PCI_EXP_LNKCTL2_TX_MARGIN);
+   tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+  

[PATCH 2/2] drm: replace Target Link Speed magic numbers with PCI_EXP_LNKCTL2 definitions

2019-11-11 Thread Bjorn Helgaas
From: Bjorn Helgaas 

Replace hard-coded magic numbers with the descript PCI_EXP_LNKCTL2
definitions.  No functional change intended.

Signed-off-by: Bjorn Helgaas 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/cik.c | 8 
 drivers/gpu/drm/amd/amdgpu/si.c  | 8 
 drivers/gpu/drm/radeon/cik.c | 8 
 drivers/gpu/drm/radeon/si.c  | 8 
 4 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index e4a595cdd4c1..3067bb874032 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1527,13 +1527,13 @@ static void cik_pcie_gen3_enable(struct amdgpu_device 
*adev)
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
 
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~0xf;
+   tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-   tmp16 |= 3; /* gen3 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
-   tmp16 |= 2; /* gen2 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
-   tmp16 |= 1; /* gen1 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index cf543410a424..d5c83d82063b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1762,13 +1762,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device 
*adev)
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~0xf;
+   tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-   tmp16 |= 3;
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
-   tmp16 |= 2;
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
-   tmp16 |= 1;
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 95ffa0bff2d8..a280442c81aa 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9647,13 +9647,13 @@ static void cik_pcie_gen3_enable(struct radeon_device 
*rdev)
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~0xf;
+   tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
-   tmp16 |= 3; /* gen3 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
-   tmp16 |= 2; /* gen2 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
-   tmp16 |= 1; /* gen1 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 69993d34d1e9..529e70a42019 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7230,13 +7230,13 @@ static void si_pcie_gen3_enable(struct radeon_device 
*rdev)
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-   tmp16 &= ~0xf;
+   tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
-   tmp16 |= 3; /* gen3 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
-   tmp16 |= 2; /* gen2 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
-   tmp16 |= 1; /* gen1 */
+   tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
-- 
2.24.0.rc1.363.gb1bccd3e3d-goog

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v2] drm/amdgpu/smu_v11: Unify and fix power limits

2019-11-11 Thread Matt Coffin
[Why]
On Navi10, and presumably arcterus, updating pp_table via sysfs would
not re-scale the maximum possible power limit one can set. On navi10,
the SMU code ignored the power percentage overdrive setting entirely,
and would not allow you to exceed the default power limit at all.

[How]
Adding a function to the SMU interface to get the pptable version of the
default power limit allows ASIC-specific code to provide the correct
maximum-settable power limit for the current pptable.

Signed-off-by: Matt Coffin 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 12 +-
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 22 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  4 +-
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +
 .../drm/amd/powerplay/inc/smu_v11_0_pptable.h |  2 +
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 22 +-
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 40 +--
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c|  1 -
 8 files changed, 77 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 66faea66a8e9..43862afdbe27 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1107,7 +1107,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
 
-   ret = smu_get_power_limit(smu, &smu->default_power_limit, true, 
false);
+   ret = smu_get_power_limit(smu, &smu->default_power_limit, 
false, false);
if (ret)
return ret;
}
@@ -2509,3 +2509,13 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
 
return ret;
 }
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
+{
+   uint32_t ret = 0;
+
+   if (smu->ppt_funcs->get_pptable_power_limit)
+   ret = smu->ppt_funcs->get_pptable_power_limit(smu);
+
+   return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 3099ac256bd3..ebb9c8064867 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1261,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
 
 static int arcturus_get_power_limit(struct smu_context *smu,
 uint32_t *limit,
-bool asic_default)
+bool cap)
 {
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
 
-   if (!smu->default_power_limit ||
-   !smu->power_limit) {
+   if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, 
SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1292,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context 
*smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
 
-   if (smu->od_enabled) {
-   asic_default_power_limit *= (100 + 
smu->smu_table.TDPODLimit);
-   asic_default_power_limit /= 100;
-   }
-
-   smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
 
-   if (asic_default)
-   *limit = smu->default_power_limit;
+   if (cap)
+   *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
 
@@ -2070,6 +2063,12 @@ static void arcturus_i2c_eeprom_control_fini(struct 
i2c_adapter *control)
i2c_del_adapter(control);
 }
 
+static uint32_t arcterus_get_pptable_power_limit(struct smu_context *smu)
+{
+   PPTable_t *pptable = smu->smu_table.driver_pptable;
+   return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2160,6 +2159,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+   .get_pptable_power_limit = arcterus_get_pptable_power_limit,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 8120e7587585..999445c5c010 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -261,7 +261,6 @

[PATCH v2 3/3] drm/amdgpu/navi10: Implement od clk printing

2019-11-11 Thread Matt Coffin
[Why]
Before this patch, navi10 overdrive settings could not be printed via
pp_od_clk_voltage

[How]
Implement printing for the overdrive settings for the following clocks
in navi10's ppt print_clk_levels implementation:

* SMU_OD_SCLK
* SMU_OD_MCLK
* SMU_OD_VDDC_CURVE

Signed-off-by: Matt Coffin 
---
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 56 --
 1 file changed, 51 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 3e26c03d5eed..4fbdf0e507f3 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -691,13 +691,25 @@ static bool navi10_is_support_fine_grained_dpm(struct 
smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
 }
 
+static inline bool navi10_od_feature_is_supported(struct 
smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+{
+   return od_table->cap[feature];
+}
+
+
 static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
 {
+   OverDriveTable_t *od_table;
+   struct smu_11_0_overdrive_table *od_settings;
+   uint16_t *curve_settings;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
+   struct smu_table_context *table_context = &smu->smu_table;
+   od_table = (OverDriveTable_t *)table_context->overdrive_table;
+   od_settings = smu->od_settings;
 
switch (clk_type) {
case SMU_GFXCLK:
@@ -748,6 +760,45 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 
}
break;
+   case SMU_OD_SCLK:
+   if (!smu->od_enabled || !od_table || !od_settings)
+   break;
+   if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+   break;
+   size += sprintf(buf + size, "OD_SCLK:\n");
+   size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", 
od_table->GfxclkFmin, od_table->GfxclkFmax);
+   break;
+   case SMU_OD_MCLK:
+   if (!smu->od_enabled || !od_table || !od_settings)
+   break;
+   if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_UCLK_MAX))
+   break;
+   size += sprintf(buf + size, "OD_MCLK:\n");
+   size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+   break;
+   case SMU_OD_VDDC_CURVE:
+   if (!smu->od_enabled || !od_table || !od_settings)
+   break;
+   if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+   break;
+   size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+   for (i = 0; i < 3; i++) {
+   switch (i) {
+   case 0:
+   curve_settings = &od_table->GfxclkFreq1;
+   break;
+   case 1:
+   curve_settings = &od_table->GfxclkFreq2;
+   break;
+   case 2:
+   curve_settings = &od_table->GfxclkFreq3;
+   break;
+   default:
+   break;
+   }
+   size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, 
curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+   }
+   break;
default:
break;
}
@@ -1661,11 +1712,6 @@ static inline void navi10_dump_od_table(OverDriveTable_t 
*od_table) {
pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
 }
 
-static inline bool navi10_od_feature_is_supported(struct 
smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
-{
-   return od_table->cap[feature];
-}
-
 static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table 
*od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
 {
if (value < od_table->min[setting]) {
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v2 1/3] drm/amdgpu/navi10: implement sclk/mclk OD via pp_od_clk_voltage

2019-11-11 Thread Matt Coffin
[Why]
Before this patch, there was no way to use pp_od_clk_voltage on navi

[How]
Similar to the vega20 implementation, but using the common smc_v11_0
headers, implemented the pp_od_clk_voltage API for navi10's pptable
implementation

Signed-off-by: Matt Coffin 
---
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |   2 +
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 180 ++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c |  27 +++
 3 files changed, 209 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index fd6ec9033d06..154b57a4dbbb 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -250,4 +250,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct 
smu_context *smu, enum smu_clk_
 
 int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
 
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool 
initialize, size_t overdrive_table_size);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 354f70978f82..354f63103308 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -1649,10 +1649,188 @@ static int navi10_update_pcie_parameters(struct 
smu_context *smu,
  SMU_MSG_OverridePcieParameters,
  smu_pcie_arg);
}
+   return ret;
+}
+
+static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
+   pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, 
od_table->GfxclkFmax);
+   pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, 
od_table->GfxclkVolt1);
+   pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, 
od_table->GfxclkVolt2);
+   pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, 
od_table->GfxclkVolt3);
+   pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
+   pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
+}
+
+static inline bool navi10_od_feature_is_supported(struct 
smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+{
+   return od_table->cap[feature];
+}
+
+static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table 
*od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
+{
+   if (value < od_table->min[setting]) {
+   pr_warn("OD setting (%d, %d) is less than the minimum allowed 
(%d)\n", setting, value, od_table->min[setting]);
+   return -EINVAL;
+   }
+   if (value > od_table->max[setting]) {
+   pr_warn("OD setting (%d, %d) is greater than the maximum 
allowed (%d)\n", setting, value, od_table->max[setting]);
+   return -EINVAL;
+   }
+   return 0;
+}
+
+static int navi10_setup_od_limits(struct smu_context *smu) {
+   struct smu_11_0_overdrive_table *overdrive_table = NULL;
+   struct smu_11_0_powerplay_table *powerplay_table = NULL;
+
+   if (!smu->smu_table.power_play_table) {
+   pr_err("powerplay table uninitialized!\n");
+   return -ENOENT;
+   }
+   powerplay_table = (struct smu_11_0_powerplay_table 
*)smu->smu_table.power_play_table;
+   overdrive_table = &powerplay_table->overdrive_table;
+   if (!smu->od_settings) {
+   smu->od_settings = kmemdup(overdrive_table, sizeof(struct 
smu_11_0_overdrive_table), GFP_KERNEL);
+   } else {
+   memcpy(smu->od_settings, overdrive_table, sizeof(struct 
smu_11_0_overdrive_table));
+   }
+   return 0;
+}
+
+static int navi10_set_default_od_settings(struct smu_context *smu, bool 
initialize) {
+   OverDriveTable_t *od_table;
+   int ret = 0;
+
+   ret = smu_v11_0_set_default_od_settings(smu, initialize, 
sizeof(OverDriveTable_t));
+   if (ret)
+   return ret;
+
+   if (initialize) {
+   ret = navi10_setup_od_limits(smu);
+   if (ret) {
+   pr_err("Failed to retrieve board OD limits\n");
+   return ret;
+   }
+
+   }
+
+   od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+   if (od_table) {
+   navi10_dump_od_table(od_table);
+   }
 
return ret;
 }
 
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum 
PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+   int i;
+   int ret = 0;
+   struct smu_table_context *table_context = &smu->smu_table;
+   OverDriveTable_t *od_table;
+   struct smu_11_0_overdrive_table *od_settings;
+   od_table = (OverDriveTable_t *)table_context->overdrive_table;
+
+   if (!smu->od_enabled) {
+   pr_warn("OverDrive is not enabled!\n");
+   return -EINVAL;
+   }
+
+   if (!smu->od_settings) {
+   pr_err("OD board limits are not set!\n");

[PATCH v2 2/3] drm/amdgpu/navi10: implement GFXCLK_CURVE overdrive

2019-11-11 Thread Matt Coffin
[Why]
Before this patch, there was no way to set the gfxclk voltage curve in
the overdrive settings for navi10 through pp_od_clk_voltage

[How]
Add the required implementation to navi10's ppt dpm table editing
implementation, similar to the vega20 implementation and interface.

Signed-off-by: Matt Coffin 
---
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 60 +-
 drivers/gpu/drm/amd/powerplay/navi10_ppt.h |  2 +
 2 files changed, 60 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 354f63103308..3e26c03d5eed 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -1728,6 +1728,8 @@ static int navi10_od_edit_dpm_table(struct smu_context 
*smu, enum PP_OD_DPM_TABL
struct smu_table_context *table_context = &smu->smu_table;
OverDriveTable_t *od_table;
struct smu_11_0_overdrive_table *od_settings;
+   enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
+   uint16_t *freq_ptr, *voltage_ptr;
od_table = (OverDriveTable_t *)table_context->overdrive_table;
 
if (!smu->od_enabled) {
@@ -1824,8 +1826,62 @@ static int navi10_od_edit_dpm_table(struct smu_context 
*smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_VDDC_CURVE:
-   // TODO: implement
-   return -ENOSYS;
+   if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+   pr_warn("GFXCLK_CURVE not supported!\n");
+   return -ENOTSUPP;
+   }
+   if (size < 3) {
+   pr_info("invalid number of parameters: %d\n", size);
+   return -EINVAL;
+   }
+   if (!od_table) {
+   pr_info("Overdrive is not initialized\n");
+   return -EINVAL;
+   }
+
+   switch (input[0]) {
+   case 0:
+   freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
+   voltage_setting = 
SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
+   freq_ptr = &od_table->GfxclkFreq1;
+   voltage_ptr = &od_table->GfxclkVolt1;
+   break;
+   case 1:
+   freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
+   voltage_setting = 
SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
+   freq_ptr = &od_table->GfxclkFreq2;
+   voltage_ptr = &od_table->GfxclkVolt2;
+   break;
+   case 2:
+   freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
+   voltage_setting = 
SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
+   freq_ptr = &od_table->GfxclkFreq3;
+   voltage_ptr = &od_table->GfxclkVolt3;
+   break;
+   default:
+   pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
+   pr_info("Supported indices: [0, 1, 2]\n");
+   return -EINVAL;
+   }
+   ret = navi10_od_setting_check_range(od_settings, freq_setting, 
input[1]);
+   if (ret)
+   return ret;
+   // Allow setting zero to disable the OverDrive VDDC curve
+   if (input[2] != 0) {
+   ret = navi10_od_setting_check_range(od_settings, 
voltage_setting, input[2]);
+   if (ret)
+   return ret;
+   *freq_ptr = input[1];
+   *voltage_ptr = ((uint16_t)input[2]) * 
NAVI10_VOLTAGE_SCALE;
+   pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], 
*freq_ptr, *voltage_ptr);
+   } else {
+   // If setting 0, disable all voltage curve settings
+   od_table->GfxclkVolt1 = 0;
+   od_table->GfxclkVolt2 = 0;
+   od_table->GfxclkVolt3 = 0;
+   }
+   navi10_dump_od_table(od_table);
+   break;
default:
return -ENOSYS;
}
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index a37e37c5f105..fd6dda1a67a1 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -33,6 +33,8 @@
 #define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
 #define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK  (1448)
 
+#define NAVI10_VOLTAGE_SCALE (4)
+
 extern void navi10_set_ppt_funcs(struct smu_context *smu);
 
 #endif
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-

Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

2019-11-11 Thread Andrey Grodzovsky
Note that kthread_park waits for kthread->parked to be signaled before 
proceeding - so in the scenario you described it meas main thread is 
running (not parked and so kthread->parked is not signaled) and so 
kthread_park will not proceed until the sched thread finish current loop 
(including removing any signaled jobs from ring_mirror_list) and is back 
to 
wait_event_interruptible->drm_sched_blocked->kthread_parkme->complete(&self->parked) 
to park itself - so looks to me it should be OK.


Andrey


On 11/11/19 2:19 AM, Deng, Emily wrote:

Hi Andrey,
 I don’t think your patch will help for this. As it will may call 
kthread_should_park in drm_sched_cleanup_jobs first, and then call 
kcl_kthread_park. And then it still has a race between the 2 threads.

Best wishes
Emily Deng




-Original Message-
From: Grodzovsky, Andrey 
Sent: Saturday, November 9, 2019 3:01 AM
To: Koenig, Christian ; Deng, Emily
; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr


On 11/8/19 5:35 AM, Koenig, Christian wrote:

Hi Emily,

exactly that can't happen. See here:


      /* Don't destroy jobs while the timeout worker is running */
      if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
      !cancel_delayed_work(&sched->work_tdr))
      return NULL;

We never free jobs while the timeout working is running to prevent
exactly that issue.


I don't think this protects us if drm_sched_cleanup_jobs is called for scheduler
which didn't experience a timeout, in amdgpu_device_gpu_recover we access
sched->ring_mirror_list for all the schedulers on a device so this condition
above won't protect us. What in fact could help maybe is my recent patch
541c521 drm/sched: Avoid job cleanup if sched thread is parked. because we
do park each of the scheduler threads during tdr job before trying to access
sched->ring_mirror_list.

Emily - did you see this problem with that patch in place ? I only pushed it
yesterday.

Andrey



Regards,
Christian.

Am 08.11.19 um 11:32 schrieb Deng, Emily:

Hi Christian,
The drm_sched_job_timedout-> amdgpu_job_timedout call

amdgpu_device_gpu_recover. I mean the main scheduler free the jobs while
in amdgpu_device_gpu_recover, and before calling drm_sched_stop.

Best wishes
Emily Deng




-Original Message-
From: Koenig, Christian 
Sent: Friday, November 8, 2019 6:26 PM
To: Deng, Emily ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

Hi Emily,

well who is calling amdgpu_device_gpu_recover() in this case?

When it's not the scheduler we shouldn't have a guilty job in the first place.

Regards,
Christian.

Am 08.11.19 um 11:22 schrieb Deng, Emily:

Hi Chrisitan,
 No, I am with the new branch and also has the patch. Even
it are freed by

main scheduler, how we could avoid main scheduler to free jobs while
enter to function amdgpu_device_gpu_recover?

Best wishes
Emily Deng




-Original Message-
From: Koenig, Christian 
Sent: Friday, November 8, 2019 6:15 PM
To: Deng, Emily ;
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for
tdr

Hi Emily,

in this case you are on an old code branch.

Jobs are freed now by the main scheduler thread and only if no
timeout handler is running.

See this patch here:

commit 5918045c4ed492fb5813f980dcf89a90fefd0a4e
Author: Christian König 
Date:   Thu Apr 18 11:00:21 2019 -0400

    drm/scheduler: rework job destruction

Regards,
Christian.

Am 08.11.19 um 11:11 schrieb Deng, Emily:

Hi Christian,
  Please refer to follow log, when it enter to
amdgpu_device_gpu_recover

function, the bad job 5086879e is freeing in function
amdgpu_job_free_cb  at the same time, because of the hardware
fence

signal.

But amdgpu_device_gpu_recover goes faster, at this case, the
s_fence is already freed, but job is not freed in time. Then this issue

occurs.

[  449.792189] [drm:amdgpu_job_timedout [amdgpu]] *ERROR* ring

sdma0

timeout, signaled seq=2481, emitted seq=2483 [  449.793202]
[drm:amdgpu_job_timedout [amdgpu]] *ERROR* Process information:

process  pid 0 thread  pid 0, s_job:5086879e [
449.794163] amdgpu
:00:08.0: GPU reset begin!

[  449.794175] Emily:amdgpu_job_free_cb,Process information:
process pid 0 thread  pid 0, s_job:5086879e [
449.794221] Emily:amdgpu_job_free_cb,Process information: process
pid 0 thread pid 0, s_job:66eb74ab [  449.794222]
Emily:amdgpu_job_free_cb,Process information: process  pid 0
thread pid 0, s_job:d4438ad9 [  449.794255]
Emily:amdgpu_job_free_cb,Process information: process  pid 0
thread pid 0, s_job:b6d69c65 [  449.794257]
Emily:amdgpu_job_free_cb,Process information: process  pid 0
thread pid 0,

s_job:ea85e922 [  449.794287]
Emily:amdgpu_job_free_cb,Process
information: process  pid 0 thread  pid 0, s_job:ed3a5ac6
[ 449.794366] BUG: unable to handle kernel NULL pointer

Re: [PATCH 1/2] drm/amdkfd: Use better name to indicate the offset is in dwords

2019-11-11 Thread Yong Zhao

ping

On 2019-11-01 4:10 p.m., Zhao, Yong wrote:

Change-Id: I75da23bba90231762cf58da3170f5bb77ece45ed
Signed-off-by: Yong Zhao 
---
  .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  |  2 +-
  drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c  | 14 +++---
  drivers/gpu/drm/amd/amdkfd/kfd_priv.h  |  8 
  3 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 984c2f2b24b6..4503fb26fe5b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device 
*qpd, struct queue *q)
}
  
  	q->properties.doorbell_off =

-   kfd_doorbell_id_to_offset(dev, q->process,
+   kfd_get_doorbell_dw_offset_from_bar(dev, q->process,
  q->doorbell_id);
  
  	return 0;

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index ebe79bf00145..f904355c44a1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
  
-	kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);

+   kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32);
  
  	kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,

   kfd_doorbell_process_slice(kfd));
@@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
pr_debug("doorbell base   == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
  
-	pr_debug("doorbell_id_offset  == 0x%08lX\n",

-   kfd->doorbell_id_offset);
+   pr_debug("doorbell_base_dw_offset  == 0x%08lX\n",
+   kfd->doorbell_base_dw_offset);
  
  	pr_debug("doorbell_process_limit  == 0x%08lX\n",

doorbell_process_limit);
@@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 * Calculating the kernel doorbell offset using the first
 * doorbell page.
 */
-   *doorbell_off = kfd->doorbell_id_offset + inx;
+   *doorbell_off = kfd->doorbell_base_dw_offset + inx;
  
  	pr_debug("Get kernel queue doorbell\n"

" doorbell offset   == 0x%08X\n"
@@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
}
  }
  
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,

+unsigned int kfd_get_doorbell_dw_offset_from_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id)
  {
/*
-* doorbell_id_offset accounts for doorbells taken by KGD.
+* doorbell_base_dw_offset accounts for doorbells taken by KGD.
 * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
 * the process's doorbells. The offset returned is in dword
 * units regardless of the ASIC-dependent doorbell size.
 */
-   return kfd->doorbell_id_offset +
+   return kfd->doorbell_base_dw_offset +
process->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 62db4d20ed32..7c561c98f2e2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -238,9 +238,9 @@ struct kfd_dev {
 * KFD. It is aligned for mapping
 * into user mode
 */
-   size_t doorbell_id_offset;  /* Doorbell offset (from KFD doorbell
-* to HW doorbell, GFX reserved some
-* at the start)
+   size_t doorbell_base_dw_offset; /* Doorbell dword offset (from KFD
+* doorbell to PCI doorbell bar,
+* GFX reserved some at the start)
 */
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
   * page used by kernel queue
@@ -821,7 +821,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 
__iomem *db_addr);
  u32 read_kernel_doorbell(u32 __iomem *db);
  void write_kernel_doorbell(void __iomem *db, u32 value);
  void write_kernel_doorbell64(void __iomem *db, u64 value);
-unsigned int kfd_doorbell

Re: [PATCH] drm/amdkfd: Rename create_cp_queue() to init_user_queue()

2019-11-11 Thread Yong Zhao

ping

On 2019-11-01 4:12 p.m., Zhao, Yong wrote:

create_cp_queue() could also work with SDMA queues, so we should rename
it.

Change-Id: I76cbaed8fa95dd9062d786cbc1dd037ff041da9d
Signed-off-by: Yong Zhao 
---
  drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 6 +++---
  1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 48185d2957e9..ebb2f69b438c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqm->queue_slot_bitmap = NULL;
  }
  
-static int create_cp_queue(struct process_queue_manager *pqm,

+static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, unsigned int qid)
@@ -251,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
  
-		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);

+   retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -272,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
  
-		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);

+   retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH -next] drm/amd/display: remove set but not used variable 'bpc'

2019-11-11 Thread Alex Deucher
Applied.  Thanks!

Alex

On Sun, Nov 10, 2019 at 9:30 PM YueHaibing  wrote:
>
> Fixes gcc '-Wunused-but-set-variable' warning:
>
> drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c: In function 
> get_pbn_from_timing:
> drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2364:11: warning:
>  variable bpc set but not used [-Wunused-but-set-variable]
>
> It is not used since commit e49f69363adf ("drm/amd/display: use
> proper formula to calculate bandwidth from timing")
>
> Signed-off-by: YueHaibing 
> ---
>  drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 --
>  1 file changed, 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
> b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> index bdc8be3..53394e2 100644
> --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
> @@ -2653,13 +2653,11 @@ static int get_color_depth(enum dc_color_depth 
> color_depth)
>
>  static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
>  {
> -   uint32_t bpc;
> uint64_t kbps;
> struct fixed31_32 peak_kbps;
> uint32_t numerator;
> uint32_t denominator;
>
> -   bpc = 
> get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
> kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
>
> /*
> --
> 2.7.4
>
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH -next] drm/amd/display: remove set but not used variable 'ds_port'

2019-11-11 Thread Alex Deucher
Applied.  thanks!

Alex

On Sun, Nov 10, 2019 at 9:29 PM YueHaibing  wrote:
>
> Fixes gcc '-Wunused-but-set-variable' warning:
>
> drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c: In function 
> dp_wa_power_up_0010FA:
> drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:2320:35: warning:
>  variable ds_port set but not used [-Wunused-but-set-variable]
>
> It is never used, so can be removed.
>
> Signed-off-by: YueHaibing 
> ---
>  drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 
>  1 file changed, 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
> b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
> index 65de32f..b814b74 100644
> --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
> +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
> @@ -2910,7 +2910,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, 
> uint8_t *dpcd_data,
> int length)
>  {
> int retry = 0;
> -   union dp_downstream_port_present ds_port = { 0 };
>
> if (!link->dpcd_caps.dpcd_rev.raw) {
> do {
> @@ -2923,9 +2922,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, 
> uint8_t *dpcd_data,
> } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
> }
>
> -   ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
> -DP_DPCD_REV];
> -
> if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
> switch (link->dpcd_caps.branch_dev_id) {
> /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power 
> down
> --
> 2.7.4
>
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/powerplay: remove set but not used variable 'threshold', 'state'

2019-11-11 Thread Alex Deucher
Applied.  Thanks!

Alex

On Mon, Nov 11, 2019 at 3:07 AM zhengbin  wrote:
>
> Fixes gcc '-Wunused-but-set-variable' warning:
>
> drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c: In function 
> fiji_populate_single_graphic_level:
> drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c:943:11: warning: variable 
> threshold set but not used [-Wunused-but-set-variable]
> drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c: In function 
> fiji_populate_memory_timing_parameters:
> drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c:1504:8: warning: variable 
> state set but not used [-Wunused-but-set-variable]
>
> They are introduced by commit 2e112b4ae3ba ("drm/amd/pp:
> remove fiji_smc/smumgr split."), but never used,
> so remove it.
>
> Reported-by: Hulk Robot 
> Signed-off-by: zhengbin 
> ---
>  drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 7 ++-
>  1 file changed, 2 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c 
> b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
> index da025b1..32ebb38 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
> @@ -940,7 +940,7 @@ static int fiji_populate_single_graphic_level(struct 
> pp_hwmgr *hwmgr,
>  {
> int result;
> /* PP_Clocks minClocks; */
> -   uint32_t threshold, mvdd;
> +   uint32_t mvdd;
> struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
> struct phm_ppt_v1_information *table_info =
> (struct phm_ppt_v1_information *)(hwmgr->pptable);
> @@ -973,8 +973,6 @@ static int fiji_populate_single_graphic_level(struct 
> pp_hwmgr *hwmgr,
> level->VoltageDownHyst = 0;
> level->PowerThrottle = 0;
>
> -   threshold = clock * data->fast_watermark_threshold / 100;
> -
> data->display_timing.min_clock_in_sr = 
> hwmgr->display_config->min_core_set_clock_in_sr;
>
> if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 
> PHM_PlatformCaps_SclkDeepSleep))
> @@ -1501,7 +1499,7 @@ static int 
> fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
> uint32_t dram_timing;
> uint32_t dram_timing2;
> uint32_t burstTime;
> -   ULONG state, trrds, trrdl;
> +   ULONG trrds, trrdl;
> int result;
>
> result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
> @@ -1513,7 +1511,6 @@ static int 
> fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
> dram_timing2 = cgs_read_register(hwmgr->device, 
> mmMC_ARB_DRAM_TIMING2);
> burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
>
> -   state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
> trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
> trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
>
> --
> 2.7.4
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: Use static const, not const static

2019-11-11 Thread Alex Deucher
Applied.  Thanks!

Alex

On Mon, Nov 11, 2019 at 9:11 AM zhengbin  wrote:
>
> Move the static keyword to the front of declarations.
>
> Reported-by: Hulk Robot 
> Signed-off-by: zhengbin 
> ---
>  drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
> b/drivers/gpu/drm/amd/display/dc/core/dc.c
> index 1fdba13..0d8c663 100644
> --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
> +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
> @@ -69,7 +69,7 @@
>  #define DC_LOGGER \
> dc->ctx->logger
>
> -const static char DC_BUILD_ID[] = "production-build";
> +static const char DC_BUILD_ID[] = "production-build";
>
>  /**
>   * DOC: Overview
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 0/2] remove some set but not used variables in hwmgr

2019-11-11 Thread Alex Deucher
Applied.  Thanks!

Alex

On Sun, Nov 10, 2019 at 11:04 PM Quan, Evan  wrote:
>
> Series is reviewed-by: Evan Quan 
>
> > -Original Message-
> > From: zhengbin 
> > Sent: Monday, November 11, 2019 11:46 AM
> > To: rex@amd.com; Quan, Evan ; Deucher,
> > Alexander ; Koenig, Christian
> > ; Zhou, David(ChunMing)
> > ; airl...@linux.ie; dan...@ffwll.ch; amd-
> > g...@lists.freedesktop.org; dri-de...@lists.freedesktop.org
> > Cc: zhengbi...@huawei.com
> > Subject: [PATCH 0/2] remove some set but not used variables in hwmgr
> >
> > zhengbin (2):
> >   drm/amd/powerplay: remove set but not used variable
> > 'vbios_version','data'
> >   drm/amd/powerplay: remove set but not used variable 'data'
> >
> >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   | 4 
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 --
> >  2 files changed, 6 deletions(-)
> >
> > --
> > 2.7.4
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amd/display: Fix unsigned variable compared to less than zero

2019-11-11 Thread Gustavo A. R. Silva
Currenly, the error check below on variable *vcpi_slots* is always
false because it is a uint64_t type variable, hence, the values
this variable can hold are never less than zero:

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:
4870 if (dm_new_connector_state->vcpi_slots < 0) {
4871 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", 
(int)dm_new_connector_stat e->vcpi_slots);
4872 return dm_new_connector_state->vcpi_slots;
4873 }

Fix this by making *vcpi_slots* of int type.

Addresses-Coverity: 1487838 ("Unsigned compared against 0")
Fixes: b4c578f08378 ("drm/amd/display: Add MST atomic routines")
Signed-off-by: Gustavo A. R. Silva 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6db07e9e33ab..a8fc90a927d6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -403,7 +403,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
uint8_t abm_level;
-   uint64_t vcpi_slots;
+   int vcpi_slots;
uint64_t pbn;
 };
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: Fix unsigned variable compared to less than zero

2019-11-11 Thread Mikita Lipski


Thanks for catching it!

Reviewed-by: Mikita Lipski 


On 11.11.2019 12:25, Gustavo A. R. Silva wrote:

Currenly, the error check below on variable*vcpi_slots*  is always
false because it is a uint64_t type variable, hence, the values
this variable can hold are never less than zero:

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:
4870 if (dm_new_connector_state->vcpi_slots < 0) {
4871 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", 
(int)dm_new_connector_stat e->vcpi_slots);
4872 return dm_new_connector_state->vcpi_slots;
4873 }

Fix this by making*vcpi_slots*  of int type

Addresses-Coverity: 1487838 ("Unsigned compared against 0")
Fixes: b4c578f08378 ("drm/amd/display: Add MST atomic routines")
Signed-off-by: Gustavo A. R. Silva
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6db07e9e33ab..a8fc90a927d6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -403,7 +403,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
uint8_t abm_level;
-   uint64_t vcpi_slots;
+   int vcpi_slots;
uint64_t pbn;
  };
  
-- 2.23.0


--
Thanks,
Mikita Lipski
Software Engineer, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdgpu/smu_v11: Unify and fix power limits

2019-11-11 Thread Alex Deucher
Patch is missing your signed-off-by.  Please address Evan's comments
and the signed off by and I'll apply it.

Thanks!

Alex

On Sun, Nov 10, 2019 at 11:03 PM Quan, Evan  wrote:
>
> If smu_get_pptable_power_limit() is designed to be used internally, the 
> second argument "lock_needed" can be dropped.
> Except that, the patch is reviewed-by: Evan Quan 
>
> > -Original Message-
> > From: amd-gfx  On Behalf Of Matt
> > Coffin
> > Sent: Saturday, November 9, 2019 7:54 AM
> > To: amd-gfx@lists.freedesktop.org
> > Cc: Matt Coffin 
> > Subject: [PATCH] drm/amdgpu/smu_v11: Unify and fix power limits
> >
> > [Why]
> > On Navi10, and presumably arcterus, updating pp_table via sysfs would
> > not re-scale the maximum possible power limit one can set. On navi10,
> > the SMU code ignored the power percentage overdrive setting entirely,
> > and would not allow you to exceed the default power limit at all.
> >
> > [How]
> > Adding a function to the SMU interface to get the pptable version of the
> > default power limit allows ASIC-specific code to provide the correct
> > maximum-settable power limit for the current pptable.
> > ---
> >  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 18 -
> >  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 22 +-
> >  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  4 +-
> >  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +
> >  .../drm/amd/powerplay/inc/smu_v11_0_pptable.h |  2 +
> >  drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 22 +-
> >  drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 40 +--
> >  drivers/gpu/drm/amd/powerplay/vega20_ppt.c|  1 -
> >  8 files changed, 83 insertions(+), 28 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index 66faea66a8e9..6bf940f1edfb 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -1107,7 +1107,7 @@ static int smu_smc_table_hw_init(struct
> > smu_context *smu,
> >   if (ret)
> >   return ret;
> >
> > - ret = smu_get_power_limit(smu, &smu->default_power_limit,
> > true, false);
> > + ret = smu_get_power_limit(smu, &smu->default_power_limit,
> > false, false);
> >   if (ret)
> >   return ret;
> >   }
> > @@ -2509,3 +2509,19 @@ int smu_get_dpm_clock_table(struct smu_context
> > *smu,
> >
> >   return ret;
> >  }
> > +
> > +uint32_t smu_get_pptable_power_limit(struct smu_context *smu, bool
> > lock_needed)
> > +{
> > + uint32_t ret = 0;
> > +
> > + if (lock_needed)
> > + mutex_lock(&smu->mutex);
> > +
> > + if (smu->ppt_funcs->get_pptable_power_limit)
> > + ret = smu->ppt_funcs->get_pptable_power_limit(smu);
> > +
> > + if (lock_needed)
> > + mutex_unlock(&smu->mutex);
> > +
> > + return ret;
> > +}
> > diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > index 3099ac256bd3..ebb9c8064867 100644
> > --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > @@ -1261,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct
> > smu_context *smu,
> >
> >  static int arcturus_get_power_limit(struct smu_context *smu,
> >uint32_t *limit,
> > -  bool asic_default)
> > +  bool cap)
> >  {
> >   PPTable_t *pptable = smu->smu_table.driver_pptable;
> >   uint32_t asic_default_power_limit = 0;
> >   int ret = 0;
> >   int power_src;
> >
> > - if (!smu->default_power_limit ||
> > - !smu->power_limit) {
> > + if (!smu->power_limit) {
> >   if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
> >   power_src = smu_power_get_index(smu,
> > SMU_POWER_SOURCE_AC);
> >   if (power_src < 0)
> > @@ -1292,17 +1291,11 @@ static int arcturus_get_power_limit(struct
> > smu_context *smu,
> >   pptable-
> > >SocketPowerLimitAc[PPT_THROTTLER_PPT0];
> >   }
> >
> > - if (smu->od_enabled) {
> > - asic_default_power_limit *= (100 + smu-
> > >smu_table.TDPODLimit);
> > - asic_default_power_limit /= 100;
> > - }
> > -
> > - smu->default_power_limit = asic_default_power_limit;
> >   smu->power_limit = asic_default_power_limit;
> >   }
> >
> > - if (asic_default)
> > - *limit = smu->default_power_limit;
> > + if (cap)
> > + *limit = smu_v11_0_get_max_power_limit(smu);
> >   else
> >   *limit = smu->power_limit;
> >
> > @@ -2070,6 +2063,12 @@ static void arcturus_i2c_eeprom_control_fini(struct
> > i2c_adapter *control)
> >   i2c_del_adapter(control);
> >  }
> >
> >

Re: [PATCH v2 0/3] navi10: Implement overdrive pp_od_clk_voltage

2019-11-11 Thread Alex Deucher
Patches are missing your Signed-off-by.  Please follow up with that
and I'll apply them.

Thanks!

Alex

On Fri, Nov 8, 2019 at 4:28 PM Matt Coffin  wrote:
>
> [Why]
> Before this patchset, navi10 users could not utilize the overdrive
> functionality. This prevented them from overclocking, overvolting, or
> undervolting their cards.
>
> [How]
> Similar to the vega20 implementation, add the pp_od_clk_voltage
> interface to the navi10 powerplay table.
>
> [Possible Alternatives]
> This could also be done more generically in smu_v11_0 code, but would
> require more significant changes to the vega20 code, and feature-gating
> based on chip capabilities in the smu to disable overdrive for arcterus.
> I chose this path so as not to completely refactor the vega20 side of
> things, and introduce SMU code which would not play nicely with new
> ASICs.
>
> v2: rebase off latest code, and remove an incorrect bounds check
>
> Matt Coffin (3):
>   drm/amdgpu/navi10: implement sclk/mclk OD via pp_od_clk_voltage
>   drm/amdgpu/navi10: implement GFXCLK_CURVE overdrive
>   drm/amdgpu/navi10: Implement od clk printing
>
>  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |   2 +
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 282 ++
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.h|   2 +
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c |  27 ++
>  4 files changed, 313 insertions(+)
>
> --
> 2.23.0
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: remove duplicated comparison expression

2019-11-11 Thread Alex Deucher
Applied.  Thanks!

Alex

On Mon, Nov 11, 2019 at 8:38 AM Kazlauskas, Nicholas
 wrote:
>
> On 2019-11-09 10:49 a.m., Colin King wrote:
> > From: Colin Ian King 
> >
> > There is comparison expression that is duplicated and hence one
> > of the expressions can be removed.  Remove it.
> >
> > Addresses-Coverity: ("Same on both sides")
> > Fixes: 12e2b2d4c65f ("drm/amd/display: add dcc programming for dual plane")
> > Signed-off-by: Colin Ian King 
>
> Reviewed-by: Nicholas Kazlauskas 
>
> Thanks!
>
> Nicholas Kazlauskas
>
> > ---
> >   drivers/gpu/drm/amd/display/dc/core/dc.c | 1 -
> >   1 file changed, 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
> > b/drivers/gpu/drm/amd/display/dc/core/dc.c
> > index 1fdba13b3d0f..1fa255e077d0 100644
> > --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
> > +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
> > @@ -1491,7 +1491,6 @@ static enum surface_update_type 
> > get_plane_info_update_type(const struct dc_surfa
> >   }
> >
> >   if (u->plane_info->plane_size.surface_pitch != 
> > u->surface->plane_size.surface_pitch
> > - || u->plane_info->plane_size.surface_pitch != 
> > u->surface->plane_size.surface_pitch
> >   || u->plane_info->plane_size.chroma_pitch != 
> > u->surface->plane_size.chroma_pitch) {
> >   update_flags->bits.plane_size_change = 1;
> >   elevate_update_type(&update_type, UPDATE_TYPE_MED);
> >
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/powerplay: avoid DPM reenable process on Navi1x ASICs

2019-11-11 Thread Alex Deucher
On Mon, Nov 11, 2019 at 4:25 AM Evan Quan  wrote:
>
> Otherwise, without RLC reinitialization, the DPM reenablement
> will fail. That affects the custom pptable uploading.
>
> Change-Id: I6fe2ed5ce23f2a5b66f371c0b6d1f924837e5af6
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 32 +++
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
>  2 files changed, 26 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 76a4154b3be2..a4d67b30fd72 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1293,10 +1293,25 @@ static int smu_hw_fini(void *handle)
> return ret;
> }
>
> -   ret = smu_stop_dpms(smu);
> -   if (ret) {
> -   pr_warn("Fail to stop Dpms!\n");
> -   return ret;
> +   /*
> +* For custom pptable uploading, skip the DPM features
> +* disable process on Navi1x ASICs.
> +*   - As the gfx related features are under control of
> +* RLC on those ASICs. RLC reinitialization will be
> +* needed to reenable them. That will cost much more
> +* efforts.
> +*
> +*   - SMU firmware can handle the DPM reenablement
> +* properly.
> +*/
> +   if (!smu->uploading_custom_pp_table ||
> +   !((adev->asic_type >= CHIP_NAVI10) &&
> + (adev->asic_type <= CHIP_NAVI12))) {
> +   ret = smu_stop_dpms(smu);
> +   if (ret) {
> +   pr_warn("Fail to stop Dpms!\n");
> +   return ret;
> +   }
> }
>
> kfree(table_context->driver_pptable);
> @@ -1324,13 +1339,16 @@ int smu_reset(struct smu_context *smu)
> struct amdgpu_device *adev = smu->adev;
> int ret = 0;
>
> +   smu->uploading_custom_pp_table = true;
> +

Do we need to differentiate between reloading for a pptable update and
reloading for a gpu reset or suspend/resume or is that already
handled?
Shouldn't we be setting/clearing uploading_custom_pp_table in
smu_sys_set_pp_table() around the call to smu_reset()?

Alex

> ret = smu_hw_fini(adev);
> if (ret)
> -   return ret;
> +   goto out;
>
> ret = smu_hw_init(adev);
> -   if (ret)
> -   return ret;
> +
> +out:
> +   smu->uploading_custom_pp_table = false;
>
> return ret;
>  }
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 8120e7587585..215841f5fb93 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -391,6 +391,7 @@ struct smu_context
>
> uint32_t smc_if_version;
>
> +   bool uploading_custom_pp_table;
>  };
>
>  struct i2c_adapter;
> --
> 2.24.0
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH][next] drm/amd/display: fix spelling mistake "exeuction" -> "execution"

2019-11-11 Thread Alex Deucher
Applied.  Thanks!

Alex

On Mon, Nov 11, 2019 at 8:37 AM Kazlauskas, Nicholas
 wrote:
>
> On 2019-11-09 2:49 p.m., Colin King wrote:
> > From: Colin Ian King 
> >
> > There are spelling mistakes in a DC_ERROR message and a comment.
> > Fix these.
> >
> > Signed-off-by: Colin Ian King 
>
> Reviewed-by: Nicholas Kazlauskas 
>
> Thanks!
>
> Nicholas Kazlauskas
>
> > ---
> >   drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c| 2 +-
> >   drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 2 +-
> >   2 files changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c 
> > b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
> > index 61cefe0a3790..b65b66025267 100644
> > --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
> > +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
> > @@ -92,7 +92,7 @@ void dc_dmub_srv_cmd_execute(struct dc_dmub_srv 
> > *dc_dmub_srv)
> >
> >   status = dmub_srv_cmd_execute(dmub);
> >   if (status != DMUB_STATUS_OK)
> > - DC_ERROR("Error starting DMUB exeuction: status=%d\n", 
> > status);
> > + DC_ERROR("Error starting DMUB execution: status=%d\n", 
> > status);
> >   }
> >
> >   void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
> > diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h 
> > b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
> > index aa8f0396616d..45e427d1952e 100644
> > --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
> > +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
> > @@ -416,7 +416,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv 
> > *dmub,
> >* dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
> >* @dmub: the dmub service
> >*
> > - * Begins exeuction of queued commands on the dmub.
> > + * Begins execution of queued commands on the dmub.
> >*
> >* Return:
> >*   DMUB_STATUS_OK - success
> >
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdgpu: avoid upload corrupted ta ucode to psp

2019-11-11 Thread Deucher, Alexander
Reviewed-by: Alex Deucher 

From: Hawking Zhang 
Sent: Sunday, November 10, 2019 11:41 PM
To: amd-gfx@lists.freedesktop.org ; Deucher, 
Alexander ; Clements John ; 
Ma, Le 
Cc: Zhang, Hawking 
Subject: [PATCH] drm/amdgpu: avoid upload corrupted ta ucode to psp

xgmi, ras, hdcp and dtm ta are actually separated ucode and
need to handled case by case to upload to psp.

We support the case that ta binary have one or multiple of
them built-in. As a result, the driver should check each ta
binariy's availablity before decide to upload them to psp.

In the terminate (unload) case, the driver will check the
context readiness before perform unload activity. It's fine
to keep it as is.

Change-Id: I493116970ffb557f33c06de10f786684fdcef85b
Signed-off-by: Hawking Zhang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 22 +-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 456ac04b246c..9621e207a9ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -558,7 +558,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
 struct ta_xgmi_shared_memory *xgmi_cmd;
 int ret;

-   if (!psp->adev->psp.ta_fw)
+   if (!psp->adev->psp.ta_fw ||
+   !psp->adev->psp.ta_xgmi_ucode_size ||
+   !psp->adev->psp.ta_xgmi_start_addr)
 return -ENOENT;

 if (!psp->xgmi_context.initialized) {
@@ -768,6 +770,12 @@ static int psp_ras_initialize(struct psp_context *psp)
 {
 int ret;

+   if (!psp->adev->psp.ta_ras_ucode_size ||
+   !psp->adev->psp.ta_ras_start_addr) {
+   dev_warn(psp->adev->dev, "RAS: ras ta ucode is not 
available\n");
+   return 0;
+   }
+
 if (!psp->ras.ras_initialized) {
 ret = psp_ras_init_shared_buf(psp);
 if (ret)
@@ -857,6 +865,12 @@ static int psp_hdcp_initialize(struct psp_context *psp)
 {
 int ret;

+   if (!psp->adev->psp.ta_hdcp_ucode_size ||
+   !psp->adev->psp.ta_hdcp_start_addr) {
+   dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not 
available\n");
+   return 0;
+   }
+
 if (!psp->hdcp_context.hdcp_initialized) {
 ret = psp_hdcp_init_shared_buf(psp);
 if (ret)
@@ -1030,6 +1044,12 @@ static int psp_dtm_initialize(struct psp_context *psp)
 {
 int ret;

+   if (!psp->adev->psp.ta_dtm_ucode_size ||
+   !psp->adev->psp.ta_dtm_start_addr) {
+   dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not 
available\n");
+   return 0;
+   }
+
 if (!psp->dtm_context.dtm_initialized) {
 ret = psp_dtm_init_shared_buf(psp);
 if (ret)
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 3/4] drm/ttm: rework BO delayed delete.

2019-11-11 Thread Christian König
This patch reworks the whole delayed deletion of BOs which aren't idle.

Instead of having two counters for the BO structure we resurrect the BO
when we find that a deleted BO is not idle yet.

This has many advantages, especially that we don't need to
increment/decrement the BOs reference counter any more when it
moves on the LRUs.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/ttm/ttm_bo.c  | 215 +-
 drivers/gpu/drm/ttm/ttm_bo_util.c |   1 -
 include/drm/ttm/ttm_bo_api.h  |  11 +-
 3 files changed, 97 insertions(+), 130 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1178980f4147..570b0e1089b7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -145,26 +145,6 @@ static inline uint32_t ttm_bo_type_flags(unsigned type)
return 1 << (type);
 }
 
-static void ttm_bo_release_list(struct kref *list_kref)
-{
-   struct ttm_buffer_object *bo =
-   container_of(list_kref, struct ttm_buffer_object, list_kref);
-   size_t acc_size = bo->acc_size;
-
-   BUG_ON(kref_read(&bo->list_kref));
-   BUG_ON(kref_read(&bo->kref));
-   BUG_ON(bo->mem.mm_node != NULL);
-   BUG_ON(!list_empty(&bo->lru));
-   BUG_ON(!list_empty(&bo->ddestroy));
-   ttm_tt_destroy(bo->ttm);
-   atomic_dec(&ttm_bo_glob.bo_count);
-   dma_fence_put(bo->moving);
-   if (!ttm_bo_uses_embedded_gem_object(bo))
-   dma_resv_fini(&bo->base._resv);
-   bo->destroy(bo);
-   ttm_mem_global_free(&ttm_mem_glob, acc_size);
-}
-
 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
  struct ttm_mem_reg *mem)
 {
@@ -181,21 +161,14 @@ static void ttm_bo_add_mem_to_lru(struct 
ttm_buffer_object *bo,
 
man = &bdev->man[mem->mem_type];
list_add_tail(&bo->lru, &man->lru[bo->priority]);
-   kref_get(&bo->list_kref);
 
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
 TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
-   kref_get(&bo->list_kref);
}
 }
 
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
-   BUG();
-}
-
 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
struct ttm_bo_device *bdev = bo->bdev;
@@ -203,12 +176,10 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object 
*bo)
 
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
-   kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
if (!list_empty(&bo->lru)) {
list_del_init(&bo->lru);
-   kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
 
@@ -446,74 +417,17 @@ static void ttm_bo_flush_all_fences(struct 
ttm_buffer_object *bo)
dma_fence_enable_sw_signaling(fence);
 
for (i = 0; fobj && i < fobj->shared_count; ++i) {
-   fence = rcu_dereference_protected(fobj->shared[i],
-   dma_resv_held(bo->base.resv));
+   fence = rcu_dereference_protected(fobj->shared[i], true);
 
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
}
 }
 
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
-{
-   struct ttm_bo_device *bdev = bo->bdev;
-   int ret;
-
-   ret = ttm_bo_individualize_resv(bo);
-   if (ret) {
-   /* Last resort, if we fail to allocate memory for the
-* fences block for the BO to become idle
-*/
-   dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
-   30 * HZ);
-   spin_lock(&ttm_bo_glob.lru_lock);
-   goto error;
-   }
-
-   spin_lock(&ttm_bo_glob.lru_lock);
-   ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
-   if (!ret) {
-   if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
-   ttm_bo_del_from_lru(bo);
-   spin_unlock(&ttm_bo_glob.lru_lock);
-   if (bo->base.resv != &bo->base._resv)
-   dma_resv_unlock(&bo->base._resv);
-
-   ttm_bo_cleanup_memtype_use(bo);
-   dma_resv_unlock(bo->base.resv);
-   return;
-   }
-
-   ttm_bo_flush_all_fences(bo);
-
-   /*
-* Make NO_EVICT bos immediately available to
-* shrinkers, now that they are queued for
-* destruction.
-*/
-   if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
-   bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
-

[PATCH 4/4] drm/ttm: replace dma_resv object on deleted BOs

2019-11-11 Thread Christian König
When non-imported BOs are resurrected for delayed delete we replace
the dma_resv object to allow for easy reclaiming of the resources.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/ttm/ttm_bo.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 570b0e1089b7..550574ff490f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -599,6 +599,8 @@ static void ttm_bo_release(struct kref *kref)
}
 
spin_lock(&ttm_bo_glob.lru_lock);
+   if (bo->type != ttm_bo_type_sg)
+   bo->base.resv = &bo->base._resv;
kref_init(&bo->kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&ttm_bo_glob.lru_lock);
@@ -725,7 +727,7 @@ static bool ttm_bo_evict_swapout_allowable(struct 
ttm_buffer_object *bo,
 
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
-   if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT || bo->deleted)
+   if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
ret = true;
*locked = false;
if (busy)
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/4] drm/ttm: cleanup ttm_buffer_object_transfer

2019-11-11 Thread Christian König
The function is always called with deleted BOs.

While at it cleanup the indentation as well.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/ttm/ttm_bo.c | 12 +++-
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7e7925fecd9e..1178980f4147 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -527,14 +527,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object 
*bo,
   bool interruptible, bool no_wait_gpu,
   bool unlock_resv)
 {
-   struct dma_resv *resv;
+   struct dma_resv *resv = &bo->base._resv;
int ret;
 
-   if (unlikely(list_empty(&bo->ddestroy)))
-   resv = bo->base.resv;
-   else
-   resv = &bo->base._resv;
-
if (dma_resv_test_signaled_rcu(resv, true))
ret = 0;
else
@@ -547,9 +542,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&ttm_bo_glob.lru_lock);
 
-   lret = dma_resv_wait_timeout_rcu(resv, true,
-  interruptible,
-  30 * HZ);
+   lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+30 * HZ);
 
if (lret < 0)
return lret;
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/4] drm/ttm: refine ghost BO resv criteria

2019-11-11 Thread Christian König
Ghost BOs need to stick with the resv object only when the origin is imported.

This is a low hanging fruit to avoid OOM situations on evictions.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/ttm/ttm_bo_util.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2b0e5a088da0..86d152472f38 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -511,7 +511,7 @@ static int ttm_buffer_object_transfer(struct 
ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
-   if (bo->base.resv == &bo->base._resv)
+   if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
 
dma_resv_init(&fbo->base.base._resv);
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: AMDGPU bug

2019-11-11 Thread Kazlauskas, Nicholas

On 2019-11-10 7:00 p.m., Subsentient wrote:
Hi, I've been experiencing a bug on kernels 5.2 and up that apparently 
is uncommon and/or unimportant enough to have both threads mentioning it 
die.


On a Ryzen 3 2200G, the amdgpu driver fails upon lightdm login:

https://bugzilla.redhat.com/show_bug.cgi?id=1752188

I thought I'd bring this to your attention as the others reporting this 
bug have so far been met with silence, and this has trapped me and 
others on the unsupported kernel 5.1.21.


Thanks for your time.



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



I'm not sure that the backtrace listed is particularly relevant for 
describing what issue is actually occurring, but if I had to take a 
guess based on the ASIC and kernel version I would imagine that it's 
Raven's displayable DCC support that's causing you issues.


I believe you'd need to be running mesa, xf86-video-amdgpu and running 
without a compositor in your desktop manager. Does this describe your setup?


If you can bisect that would help narrow things down further. The bisect 
commit would likely be one that's just changing the amdgpu version 
number if it's DCC (since mesa checks that before deciding to enable 
displayable DCC).


Nicholas Kazlauskas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH -next] drm/amd/display: Fix old-style declaration

2019-11-11 Thread YueHaibing
Fix a build warning:

drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:75:1:
 warning: 'static' is not at beginning of declaration [-Wold-style-declaration]

Signed-off-by: YueHaibing 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1fdba13..0d8c663 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -69,7 +69,7 @@
 #define DC_LOGGER \
dc->ctx->logger
 
-const static char DC_BUILD_ID[] = "production-build";
+static const char DC_BUILD_ID[] = "production-build";
 
 /**
  * DOC: Overview
-- 
2.7.4


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amd/display: Use static const, not const static

2019-11-11 Thread zhengbin
Move the static keyword to the front of declarations.

Reported-by: Hulk Robot 
Signed-off-by: zhengbin 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1fdba13..0d8c663 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -69,7 +69,7 @@
 #define DC_LOGGER \
dc->ctx->logger

-const static char DC_BUILD_ID[] = "production-build";
+static const char DC_BUILD_ID[] = "production-build";

 /**
  * DOC: Overview
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH][next] drm/amd/display: fix spelling mistake "exeuction" -> "execution"

2019-11-11 Thread Kazlauskas, Nicholas

On 2019-11-09 2:49 p.m., Colin King wrote:

From: Colin Ian King 

There are spelling mistakes in a DC_ERROR message and a comment.
Fix these.

Signed-off-by: Colin Ian King 


Reviewed-by: Nicholas Kazlauskas 

Thanks!

Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c| 2 +-
  drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 2 +-
  2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c 
b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 61cefe0a3790..b65b66025267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -92,7 +92,7 @@ void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
  
  	status = dmub_srv_cmd_execute(dmub);

if (status != DMUB_STATUS_OK)
-   DC_ERROR("Error starting DMUB exeuction: status=%d\n", status);
+   DC_ERROR("Error starting DMUB execution: status=%d\n", status);
  }
  
  void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index aa8f0396616d..45e427d1952e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -416,7 +416,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
   * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
   * @dmub: the dmub service
   *
- * Begins exeuction of queued commands on the dmub.
+ * Begins execution of queued commands on the dmub.
   *
   * Return:
   *   DMUB_STATUS_OK - success



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: remove duplicated comparison expression

2019-11-11 Thread Kazlauskas, Nicholas

On 2019-11-09 10:49 a.m., Colin King wrote:

From: Colin Ian King 

There is comparison expression that is duplicated and hence one
of the expressions can be removed.  Remove it.

Addresses-Coverity: ("Same on both sides")
Fixes: 12e2b2d4c65f ("drm/amd/display: add dcc programming for dual plane")
Signed-off-by: Colin Ian King 


Reviewed-by: Nicholas Kazlauskas 

Thanks!

Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc.c | 1 -
  1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1fdba13b3d0f..1fa255e077d0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1491,7 +1491,6 @@ static enum surface_update_type 
get_plane_info_update_type(const struct dc_surfa
}
  
  	if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch

-   || u->plane_info->plane_size.surface_pitch != 
u->surface->plane_size.surface_pitch
|| u->plane_info->plane_size.chroma_pitch != 
u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED);



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/powerplay: do proper cleanups on hw_fini

2019-11-11 Thread Quan, Evan
Just sent out a patch which should be able to address this issue.
https://lists.freedesktop.org/archives/amd-gfx/2019-November/042458.html

Regards,
Evan
> -Original Message-
> From: Matt Coffin 
> Sent: Saturday, November 9, 2019 4:50 AM
> To: Quan, Evan ; amd-gfx@lists.freedesktop.org
> Cc: Li, Candice ; Gui, Jack ; Alex
> Deucher 
> Subject: Re: [PATCH] drm/amd/powerplay: do proper cleanups on hw_fini
> 
> Hey guys,
> 
> 
> 
> This patch caused some kind of reversion with smu_reset on Navi10. I'm no
> expert since everything I know comes from just reading through the code, so
> this could be some kind of intended behavior, but after this patch, if you 
> write a
> pptable to the sysfs pp_table interface on navi10, then the SMU will fail to 
> reset
> successfully, and the result is seemingly an unrecoverable situation.
> 
> 
> 
> I put in a report on bugzilla with dmesg logs
> :
> https://bugs.freedesktop.org/show_bug.cgi?id=112234
> 
> 
> Finding this change was the result of a bisect to find where the issue 
> started,
> and reverting the changes to smu_hw_fini resolved the issue.
> Any advice on possible proper fixes?
> 
> Thanks in advance,
> 
> Matt
> 
> On 9/2/19 9:44 PM, Quan, Evan wrote:
> > These are needed for smu_reset support.
> >
> > Change-Id: If29ede4b99758adb08fd4e16665f44fd893ec99b
> > Signed-off-by: Evan Quan 
> > ---
> >  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 17
> +
> >  drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  3 +++
> >  drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 10 ++
> >  3 files changed, 30 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index d5ee13a78eb7..3cf8d944f890 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -1286,6 +1286,11 @@ static int smu_hw_init(void *handle)
> > return ret;
> >  }
> >
> > +static int smu_stop_dpms(struct smu_context *smu) {
> > +   return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); }
> > +
> >  static int smu_hw_fini(void *handle)
> >  {
> > struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@
> > -1298,6 +1303,18 @@ static int smu_hw_fini(void *handle)
> > smu_powergate_vcn(&adev->smu, true);
> > }
> >
> > +   ret = smu_stop_thermal_control(smu);
> > +   if (ret) {
> > +   pr_warn("Fail to stop thermal control!\n");
> > +   return ret;
> > +   }
> > +
> > +   ret = smu_stop_dpms(smu);
> > +   if (ret) {
> > +   pr_warn("Fail to stop Dpms!\n");
> > +   return ret;
> > +   }
> > +
> > kfree(table_context->driver_pptable);
> > table_context->driver_pptable = NULL;
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > index b19224cb6d6d..8e4b0ad24712 100644
> > --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > @@ -498,6 +498,7 @@ struct smu_funcs
> > int (*get_current_clk_freq)(struct smu_context *smu, enum
> smu_clk_type clk_id, uint32_t *value);
> > int (*init_max_sustainable_clocks)(struct smu_context *smu);
> > int (*start_thermal_control)(struct smu_context *smu);
> > +   int (*stop_thermal_control)(struct smu_context *smu);
> > int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors
> sensor,
> >void *data, uint32_t *size);
> > int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t
> > clk); @@ -647,6 +648,8 @@ struct smu_funcs
> > ((smu)->ppt_funcs->set_thermal_fan_table ?
> > (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)  #define
> smu_start_thermal_control(smu) \
> > ((smu)->funcs->start_thermal_control?
> > (smu)->funcs->start_thermal_control((smu)) : 0)
> > +#define smu_stop_thermal_control(smu) \
> > +   ((smu)->funcs->stop_thermal_control?
> > +(smu)->funcs->stop_thermal_control((smu)) : 0)
> >  #define smu_read_sensor(smu, sensor, data, size) \
> > ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs-
> >read_sensor((smu),
> > (sensor), (data), (size)) : 0)  #define smu_smc_read_sensor(smu,
> > sensor, data, size) \ diff --git
> > a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > index db5e94ce54af..1a38af84394e 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > @@ -1209,6 +1209,15 @@ static int
> smu_v11_0_start_thermal_control(struct smu_context *smu)
> > return ret;
> >  }
> >
> > +static int smu_v11_0_stop_thermal_control(struct smu_context *smu) {
> > +   struct amdgpu_device *adev = smu->adev;
> > +
> > +   WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
> > +
> > +   return 0;
> > +}
> > +
> >  static uint16_t convert_to_vddc(uint8_t vid)  {
> > return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); @@
> > -17

[PATCH] drm/amd/powerplay: avoid DPM reenable process on Navi1x ASICs

2019-11-11 Thread Evan Quan
Otherwise, without RLC reinitialization, the DPM reenablement
will fail. That affects the custom pptable uploading.

Change-Id: I6fe2ed5ce23f2a5b66f371c0b6d1f924837e5af6
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 32 +++
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
 2 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 76a4154b3be2..a4d67b30fd72 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1293,10 +1293,25 @@ static int smu_hw_fini(void *handle)
return ret;
}
 
-   ret = smu_stop_dpms(smu);
-   if (ret) {
-   pr_warn("Fail to stop Dpms!\n");
-   return ret;
+   /*
+* For custom pptable uploading, skip the DPM features
+* disable process on Navi1x ASICs.
+*   - As the gfx related features are under control of
+* RLC on those ASICs. RLC reinitialization will be
+* needed to reenable them. That will cost much more
+* efforts.
+*
+*   - SMU firmware can handle the DPM reenablement
+* properly.
+*/
+   if (!smu->uploading_custom_pp_table ||
+   !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+   ret = smu_stop_dpms(smu);
+   if (ret) {
+   pr_warn("Fail to stop Dpms!\n");
+   return ret;
+   }
}
 
kfree(table_context->driver_pptable);
@@ -1324,13 +1339,16 @@ int smu_reset(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
 
+   smu->uploading_custom_pp_table = true;
+
ret = smu_hw_fini(adev);
if (ret)
-   return ret;
+   goto out;
 
ret = smu_hw_init(adev);
-   if (ret)
-   return ret;
+
+out:
+   smu->uploading_custom_pp_table = false;
 
return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 8120e7587585..215841f5fb93 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -391,6 +391,7 @@ struct smu_context
 
uint32_t smc_if_version;
 
+   bool uploading_custom_pp_table;
 };
 
 struct i2c_adapter;
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr

2019-11-11 Thread Deng, Emily
Hi Christian and Andrey,
 The issue I encountered is the bad job is freeing after entering to the 
amdgpu_device_gpu_recover. Don't know why, as per Christian said, it will call 
cancel_delayed_work in drm_sched_cleanup_jobs.

Best wishes
Emily Deng



>-Original Message-
>From: amd-gfx  On Behalf Of Deng,
>Emily
>Sent: Monday, November 11, 2019 3:19 PM
>To: Grodzovsky, Andrey ; Koenig, Christian
>; amd-gfx@lists.freedesktop.org
>Subject: RE: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>
>Hi Andrey,
>I don’t think your patch will help for this. As it will may call
>kthread_should_park in drm_sched_cleanup_jobs first, and then call
>kcl_kthread_park. And then it still has a race between the 2 threads.
>
>Best wishes
>Emily Deng
>
>
>
>>-Original Message-
>>From: Grodzovsky, Andrey 
>>Sent: Saturday, November 9, 2019 3:01 AM
>>To: Koenig, Christian ; Deng, Emily
>>; amd-gfx@lists.freedesktop.org
>>Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>>
>>
>>On 11/8/19 5:35 AM, Koenig, Christian wrote:
>>> Hi Emily,
>>>
>>> exactly that can't happen. See here:
>>>
      /* Don't destroy jobs while the timeout worker is running
 */
      if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
      !cancel_delayed_work(&sched->work_tdr))
      return NULL;
>>> We never free jobs while the timeout working is running to prevent
>>> exactly that issue.
>>
>>
>>I don't think this protects us if drm_sched_cleanup_jobs is called for
>>scheduler which didn't experience a timeout, in
>>amdgpu_device_gpu_recover we access
>>sched->ring_mirror_list for all the schedulers on a device so this
>>sched->condition
>>above won't protect us. What in fact could help maybe is my recent
>>patch
>>541c521 drm/sched: Avoid job cleanup if sched thread is parked. because
>>we do park each of the scheduler threads during tdr job before trying
>>to access
>>sched->ring_mirror_list.
>>
>>Emily - did you see this problem with that patch in place ? I only
>>pushed it yesterday.
>>
>>Andrey
>>
>>
>>>
>>> Regards,
>>> Christian.
>>>
>>> Am 08.11.19 um 11:32 schrieb Deng, Emily:
 Hi Christian,
The drm_sched_job_timedout-> amdgpu_job_timedout call
>>amdgpu_device_gpu_recover. I mean the main scheduler free the jobs
>>while in amdgpu_device_gpu_recover, and before calling drm_sched_stop.

 Best wishes
 Emily Deng



> -Original Message-
> From: Koenig, Christian 
> Sent: Friday, November 8, 2019 6:26 PM
> To: Deng, Emily ; amd-
>g...@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for tdr
>
> Hi Emily,
>
> well who is calling amdgpu_device_gpu_recover() in this case?
>
> When it's not the scheduler we shouldn't have a guilty job in the first
>place.
>
> Regards,
> Christian.
>
> Am 08.11.19 um 11:22 schrieb Deng, Emily:
>> Hi Chrisitan,
>> No, I am with the new branch and also has the patch. Even
>> it are freed by
> main scheduler, how we could avoid main scheduler to free jobs
> while enter to function amdgpu_device_gpu_recover?
>> Best wishes
>> Emily Deng
>>
>>
>>
>>> -Original Message-
>>> From: Koenig, Christian 
>>> Sent: Friday, November 8, 2019 6:15 PM
>>> To: Deng, Emily ;
>>> amd-gfx@lists.freedesktop.org
>>> Subject: Re: [PATCH] drm/amdgpu: Fix the null pointer issue for
>>> tdr
>>>
>>> Hi Emily,
>>>
>>> in this case you are on an old code branch.
>>>
>>> Jobs are freed now by the main scheduler thread and only if no
>>> timeout handler is running.
>>>
>>> See this patch here:
 commit 5918045c4ed492fb5813f980dcf89a90fefd0a4e
 Author: Christian König 
 Date:   Thu Apr 18 11:00:21 2019 -0400

    drm/scheduler: rework job destruction
>>> Regards,
>>> Christian.
>>>
>>> Am 08.11.19 um 11:11 schrieb Deng, Emily:
 Hi Christian,
  Please refer to follow log, when it enter to
 amdgpu_device_gpu_recover
>>> function, the bad job 5086879e is freeing in function
>>> amdgpu_job_free_cb  at the same time, because of the hardware
>>> fence
> signal.
>>> But amdgpu_device_gpu_recover goes faster, at this case, the
>>> s_fence is already freed, but job is not freed in time. Then this
>>> issue
>>occurs.
 [  449.792189] [drm:amdgpu_job_timedout [amdgpu]] *ERROR* ring
> sdma0
 timeout, signaled seq=2481, emitted seq=2483 [  449.793202]
 [drm:amdgpu_job_timedout [amdgpu]] *ERROR* Process
>information:
>>> process  pid 0 thread  pid 0, s_job:5086879e [
>>> 449.794163] amdgpu
>>> :00:08.0: GPU reset begin!
 [  449.794175] Emily:amdgpu_job_free_cb,Process information:
 process pid 0 thread  pid 0, s_job:000

[PATCH -next] drm/amd/display: remove set but not used variable 'bpc'

2019-11-11 Thread YueHaibing
Fixes gcc '-Wunused-but-set-variable' warning:

drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c: In function 
get_pbn_from_timing:
drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2364:11: warning:
 variable bpc set but not used [-Wunused-but-set-variable]

It is not used since commit e49f69363adf ("drm/amd/display: use
proper formula to calculate bandwidth from timing")

Signed-off-by: YueHaibing 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index bdc8be3..53394e2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2653,13 +2653,11 @@ static int get_color_depth(enum dc_color_depth 
color_depth)
 
 static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
 {
-   uint32_t bpc;
uint64_t kbps;
struct fixed31_32 peak_kbps;
uint32_t numerator;
uint32_t denominator;
 
-   bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
 
/*
-- 
2.7.4


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH -next] drm/amd/display: remove set but not used variable 'ds_port'

2019-11-11 Thread YueHaibing
Fixes gcc '-Wunused-but-set-variable' warning:

drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c: In function 
dp_wa_power_up_0010FA:
drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:2320:35: warning:
 variable ds_port set but not used [-Wunused-but-set-variable]

It is never used, so can be removed.

Signed-off-by: YueHaibing 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 
 1 file changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 65de32f..b814b74 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2910,7 +2910,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, 
uint8_t *dpcd_data,
int length)
 {
int retry = 0;
-   union dp_downstream_port_present ds_port = { 0 };
 
if (!link->dpcd_caps.dpcd_rev.raw) {
do {
@@ -2923,9 +2922,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, 
uint8_t *dpcd_data,
} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
}
 
-   ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
-DP_DPCD_REV];
-
if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
switch (link->dpcd_caps.branch_dev_id) {
/* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power 
down
-- 
2.7.4


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/2] drm/amd/powerplay: remove set but not used variable 'data'

2019-11-11 Thread zhengbin
Fixes gcc '-Wunused-but-set-variable' warning:

drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c: In function 
vega10_get_performance_level:
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c:5217:23: warning: variable 
data set but not used [-Wunused-but-set-variable]

'data' is introduced by commit f688b614b643 ("drm/amd/pp:
Implement get_performance_level for legacy dgpu"), but never used,
so remove it.

Reported-by: Hulk Robot 
Signed-off-by: zhengbin 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index a4a7f85..776e632 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -5252,13 +5252,11 @@ static int vega10_get_performance_level(struct pp_hwmgr 
*hwmgr, const struct pp_
PHM_PerformanceLevel *level)
 {
const struct vega10_power_state *ps;
-   struct vega10_hwmgr *data;
uint32_t i;

if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;

-   data = hwmgr->backend;
ps = cast_const_phw_vega10_power_state(state);

i = index > ps->performance_level_count - 1 ?
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH][next] drm/amd/display: fix spelling mistake "exeuction" -> "execution"

2019-11-11 Thread Colin King
From: Colin Ian King 

There are spelling mistakes in a DC_ERROR message and a comment.
Fix these.

Signed-off-by: Colin Ian King 
---
 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c| 2 +-
 drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c 
b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 61cefe0a3790..b65b66025267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -92,7 +92,7 @@ void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
 
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK)
-   DC_ERROR("Error starting DMUB exeuction: status=%d\n", status);
+   DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 }
 
 void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index aa8f0396616d..45e427d1952e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -416,7 +416,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
  * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
  * @dmub: the dmub service
  *
- * Begins exeuction of queued commands on the dmub.
+ * Begins execution of queued commands on the dmub.
  *
  * Return:
  *   DMUB_STATUS_OK - success
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amd/powerplay: remove set but not used variable 'threshold', 'state'

2019-11-11 Thread zhengbin
Fixes gcc '-Wunused-but-set-variable' warning:

drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c: In function 
fiji_populate_single_graphic_level:
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c:943:11: warning: variable 
threshold set but not used [-Wunused-but-set-variable]
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c: In function 
fiji_populate_memory_timing_parameters:
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c:1504:8: warning: variable 
state set but not used [-Wunused-but-set-variable]

They are introduced by commit 2e112b4ae3ba ("drm/amd/pp:
remove fiji_smc/smumgr split."), but never used,
so remove it.

Reported-by: Hulk Robot 
Signed-off-by: zhengbin 
---
 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c 
b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index da025b1..32ebb38 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -940,7 +940,7 @@ static int fiji_populate_single_graphic_level(struct 
pp_hwmgr *hwmgr,
 {
int result;
/* PP_Clocks minClocks; */
-   uint32_t threshold, mvdd;
+   uint32_t mvdd;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -973,8 +973,6 @@ static int fiji_populate_single_graphic_level(struct 
pp_hwmgr *hwmgr,
level->VoltageDownHyst = 0;
level->PowerThrottle = 0;

-   threshold = clock * data->fast_watermark_threshold / 100;
-
data->display_timing.min_clock_in_sr = 
hwmgr->display_config->min_core_set_clock_in_sr;

if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 
PHM_PlatformCaps_SclkDeepSleep))
@@ -1501,7 +1499,7 @@ static int fiji_populate_memory_timing_parameters(struct 
pp_hwmgr *hwmgr,
uint32_t dram_timing;
uint32_t dram_timing2;
uint32_t burstTime;
-   ULONG state, trrds, trrdl;
+   ULONG trrds, trrdl;
int result;

result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
@@ -1513,7 +1511,6 @@ static int fiji_populate_memory_timing_parameters(struct 
pp_hwmgr *hwmgr,
dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);

-   state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);

--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 0/2] remove some set but not used variables in hwmgr

2019-11-11 Thread zhengbin
zhengbin (2):
  drm/amd/powerplay: remove set but not used variable
'vbios_version','data'
  drm/amd/powerplay: remove set but not used variable 'data'

 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   | 4 
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 --
 2 files changed, 6 deletions(-)

--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/2] drm/amd/powerplay: remove set but not used variable 'vbios_version', 'data'

2019-11-11 Thread zhengbin
Fixes gcc '-Wunused-but-set-variable' warning:

drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c: In function 
smu7_check_mc_firmware:
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:4215:11: warning: variable 
vbios_version set but not used [-Wunused-but-set-variable]
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c: In function 
smu7_get_performance_level:
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:5054:21: warning: variable 
data set but not used [-Wunused-but-set-variable]

'vbios_version' is introduced by commit 599a7e9fe1b6 ("drm/amd/powerplay:
implement smu7 hwmgr to manager asics with smu ip version 7."),
but never used, so remove it.

'data' is introduced by commit f688b614b643 ("drm/amd/pp:
Implement get_performance_level for legacy dgpu"), but never used,
so remove it.

Reported-by: Hulk Robot 
Signed-off-by: zhengbin 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 
 1 file changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c805c6f..775366a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4218,7 +4218,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
 {
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

-   uint32_t vbios_version;
uint32_t tmp;

/* Read MC indirect register offset 0x9F bits [3:0] to see
@@ -4227,7 +4226,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
 */

smu7_get_mc_microcode_version(hwmgr);
-   vbios_version = hwmgr->microcode_version_info.MC & 0xf;

data->need_long_memory_training = false;

@@ -5057,13 +5055,11 @@ static int smu7_get_performance_level(struct pp_hwmgr 
*hwmgr, const struct pp_hw
PHM_PerformanceLevel *level)
 {
const struct smu7_power_state *ps;
-   struct smu7_hwmgr *data;
uint32_t i;

if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;

-   data = hwmgr->backend;
ps = cast_const_phw_smu7_power_state(state);

i = index > ps->performance_level_count - 1 ?
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  1   2   >