[PATCH] drm/amdgpu: refine create and release logic of hive info

2020-08-17 Thread Dennis Li
Change to dynamically create and release hive info object,
which help driver support more hives in the future.

Signed-off-by: Dennis Li 

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8a55b0bc044a..fdfdc2f678c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2840,7 +2840,7 @@ static void amdgpu_device_xgmi_reset_func(struct 
work_struct *__work)
 {
struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work);
-   struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+   struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
 
/* It's a bug to not have a hive within this function */
if (WARN_ON(!hive))
@@ -2878,6 +2878,7 @@ static void amdgpu_device_xgmi_reset_func(struct 
work_struct *__work)
if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
 adev->asic_reset_res, adev->ddev->unique);
+   amdgpu_put_xgmi_hive(hive);
 }
 
 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
@@ -4286,11 +4287,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device 
*adev,
 * We always reset all schedulers for device and all devices for XGMI
 * hive so that should take care of them too.
 */
-   hive = amdgpu_get_xgmi_hive(adev, false);
+   hive = amdgpu_get_xgmi_hive(adev);
if (hive) {
if (atomic_cmpxchg(>in_reset, 0, 1) != 0) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as 
another already in progress",
job ? job->base.id : -1, hive->hive_id);
+   amdgpu_put_xgmi_hive(hive);
return 0;
}
mutex_lock(>hive_lock);
@@ -4456,6 +4458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (hive) {
atomic_set(>in_reset, 0);
mutex_unlock(>hive_lock);
+   amdgpu_put_xgmi_hive(hive);
}
 
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 5680f7eafcb1..e18606e322e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1514,7 +1514,7 @@ static void amdgpu_ras_do_recovery(struct work_struct 
*work)
struct amdgpu_device *remote_adev = NULL;
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle =  NULL;
-   struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+   struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
 
/* Build list of devices to query RAS related errors */
if  (hive && adev->gmc.xgmi.num_physical_nodes > 1)
@@ -1525,6 +1525,8 @@ static void amdgpu_ras_do_recovery(struct work_struct 
*work)
device_list_handle = _list;
}
 
+   amdgpu_put_xgmi_hive(hive);
+
list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
amdgpu_ras_log_on_err_counter(remote_adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 67a756f4337b..5315d16539f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -35,11 +35,9 @@
 
 static DEFINE_MUTEX(xgmi_mutex);
 
-#define AMDGPU_MAX_XGMI_HIVE   8
 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE4
 
-static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
-static unsigned hive_count = 0;
+static LIST_HEAD(xgmi_hive_list);
 
 static const int xgmi_pcs_err_status_reg_vg20[] = {
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
@@ -171,59 +169,47 @@ static const struct amdgpu_pcs_ras_field 
wafl_pcs_ras_fields[] = {
  *
  */
 
+static struct attribute amdgpu_xgmi_hive_id = {
+   .name = "xgmi_hive_id",
+   .mode = S_IRUGO
+};
 
-static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
-   struct device_attribute *attr, char *buf)
-{
-   struct amdgpu_hive_info *hive =
-   container_of(attr, struct amdgpu_hive_info, dev_attr);
-
-   return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
-}
+static struct attribute *amdgpu_xgmi_hive_attrs[] = {
+   _xgmi_hive_id,
+   NULL
+};
 
-static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
-   struct amdgpu_hive_info *hive)
+static ssize_t amdgpu_xgmi_show_hive_id(struct kobject *kobj,
+   struct attribute *attr, char *buf)
 {
-   int ret = 0;
-
-   if (WARN_ON(hive->kobj))
-   return -EINVAL;
-
-   hive->kobj = kobject_create_and_add("xgmi_hive_info", >dev->kobj);
-   if (!hive->kobj) {
-   dev_err(adev->dev, "XGMI: 

RE: [PATCH 4/4] drm/amdgpu/pm: only hide average power on SI and pre-RENOIR APUs

2020-08-17 Thread Quan, Evan
[AMD Official Use Only - Internal Distribution Only]

Series is reviewed-by: Evan Quan 

-Original Message-
From: amd-gfx  On Behalf Of Alex Deucher
Sent: Tuesday, August 18, 2020 3:53 AM
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander 
Subject: [PATCH 4/4] drm/amdgpu/pm: only hide average power on SI and 
pre-RENOIR APUs

We can get this on RENOIR and newer via the SMU metrics table.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f2e70655e8d9..a77f7347fdfc 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3312,12 +3312,17 @@ static umode_t hwmon_attributes_visible(struct kobject 
*kobj,

 if (((adev->flags & AMD_IS_APU) ||
  adev->family == AMDGPU_FAMILY_SI) &&/* not implemented yet */
-(attr == _dev_attr_power1_average.dev_attr.attr ||
- attr == _dev_attr_power1_cap_max.dev_attr.attr ||
+(attr == _dev_attr_power1_cap_max.dev_attr.attr ||
  attr == _dev_attr_power1_cap_min.dev_attr.attr||
  attr == _dev_attr_power1_cap.dev_attr.attr))
 return 0;

+if (((adev->family == AMDGPU_FAMILY_SI) ||
+ ((adev->flags & AMD_IS_APU) &&
+  (adev->asic_type < CHIP_RENOIR))) &&/* not implemented yet */
+(attr == _dev_attr_power1_average.dev_attr.attr))
+return 0;
+
 if (!is_support_sw_smu(adev)) {
 /* hide max/min values if we can't both query and manage the fan */
 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
--
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cevan.quan%40amd.com%7Cc72834b8042146132b1c08d842e7406e%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637332908288049501sdata=5mKxWVSyQDY1FukBEDZRCQFgP6JYO0foHN9ialkkWik%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: update driver if version for navy_flounder

2020-08-17 Thread Zhou1, Tao
[AMD Public Use]

Reviewed-by: Tao Zhou 

> -Original Message-
> From: Jiansong Chen 
> Sent: Tuesday, August 18, 2020 11:01 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Feng, Kenneth ; Zhou1, Tao
> ; Chen, Jiansong (Simon) 
> Subject: [PATCH] drm/amd/pm: update driver if version for navy_flounder
> 
> It's in accordance with pmfw 65.7.0 for navy_flounder.
> 
> Signed-off-by: Jiansong Chen 
> Change-Id: Iaac4c591f92c9a00891a29757d142c0109dcd676
> ---
>  drivers/gpu/drm/amd/pm/inc/smu_v11_0.h | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> index 65363d56e3cc..77d0996f4ec2 100644
> --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> @@ -31,7 +31,7 @@
>  #define SMU11_DRIVER_IF_VERSION_NV12 0x36  #define
> SMU11_DRIVER_IF_VERSION_NV14 0x36  #define
> SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x35 -#define
> SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3
> +#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x4
> 
>  /* MP Apertures */
>  #define MP0_Public   0x0380
> --
> 2.25.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pm: update driver if version for navy_flounder

2020-08-17 Thread Jiansong Chen
It's in accordance with pmfw 65.7.0 for navy_flounder.

Signed-off-by: Jiansong Chen 
Change-Id: Iaac4c591f92c9a00891a29757d142c0109dcd676
---
 drivers/gpu/drm/amd/pm/inc/smu_v11_0.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 65363d56e3cc..77d0996f4ec2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
 #define SMU11_DRIVER_IF_VERSION_NV12 0x36
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
 #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x35
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x4
 
 /* MP Apertures */
 #define MP0_Public 0x0380
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: Fix repeatly flr issue

2020-08-17 Thread Deng, Emily
[AMD Official Use Only - Internal Distribution Only]

>-Original Message-
>From: Das, Nirmoy 
>Sent: Wednesday, August 12, 2020 8:18 PM
>To: Deng, Emily ; amd-gfx@lists.freedesktop.org
>Subject: Re: [PATCH] drm/amdgpu: Fix repeatly flr issue
>
>
>On 8/12/20 11:19 AM, Emily.Deng wrote:
>> From: jqdeng 
>>
>> Only for no job running test case need to do recover in flr
>> notification.
>> For having job in mirror list, then let guest driver to hit job
>> timeout, and then do recover.
>>
>> Signed-off-by: jqdeng 
>> Change-Id: Ic6234fce46fa1655ba81c4149235eeac75e75868
>> ---
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 20 +++-
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 22 -
>-
>>   2 files changed, 39 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> index fe31cbeccfe9..12fe5164aaf3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> @@ -238,6 +238,9 @@ static void xgpu_ai_mailbox_flr_work(struct
>work_struct *work)
>>   struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt,
>flr_work);
>>   struct amdgpu_device *adev = container_of(virt, struct
>amdgpu_device, virt);
>>   int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
>> +int i;
>> +bool need_do_recover = true;
>
>
>We should find a better name for "need_do_recover", may be
>"need_to_recover" ?
Thanks, will modify later.
>
>
>> +struct drm_sched_job *job;
>>
>>   /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
>>* otherwise the mailbox msg will be ruined/reseted by
>> @@ -258,10 +261,25 @@ static void xgpu_ai_mailbox_flr_work(struct
>work_struct *work)
>>
>>   flr_done:
>>   up_read(>reset_sem);
>> +for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>> +struct amdgpu_ring *ring = adev->rings[i];
>> +
>> +if (!ring || !ring->sched.thread)
>> +continue;
>> +
>> +spin_lock(>sched.job_list_lock);
>> +job = list_first_entry_or_null(>sched.ring_mirror_list,
>> +struct drm_sched_job, node);
>> +spin_unlock(>sched.job_list_lock);
>> +if (job) {
>> +need_do_recover = false;
>> +break;
>> +}
>> +}
>
>
>This 1st job retrieval logic can move to a function as there are two
>instance of it.
>Sorry, I didn't get your point.
>
>>
>>   /* Trigger recovery for world switch failure if no TDR */
>>   if (amdgpu_device_should_recover_gpu(adev)
>> -&& adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)
>> +&& (need_do_recover || adev->sdma_timeout ==
>MAX_SCHEDULE_TIMEOUT))
>>   amdgpu_device_gpu_recover(adev, NULL);
>>   }
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
>b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
>> index 6f55172e8337..fc92c494df0b 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
>> @@ -259,6 +259,9 @@ static void xgpu_nv_mailbox_flr_work(struct
>work_struct *work)
>>   struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt,
>flr_work);
>>   struct amdgpu_device *adev = container_of(virt, struct
>amdgpu_device, virt);
>>   int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
>> +int i;
>> +bool need_do_recover = true;
>> +struct drm_sched_job *job;
>>
>>   /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
>>* otherwise the mailbox msg will be ruined/reseted by
>> @@ -279,10 +282,25 @@ static void xgpu_nv_mailbox_flr_work(struct
>work_struct *work)
>>
>>   flr_done:
>>   up_read(>reset_sem);
>> +for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>> +struct amdgpu_ring *ring = adev->rings[i];
>> +
>> +if (!ring || !ring->sched.thread)
>> +continue;
>> +
>> +spin_lock(>sched.job_list_lock);
>> +job = list_first_entry_or_null(>sched.ring_mirror_list,
>> +struct drm_sched_job, node);
>> +spin_unlock(>sched.job_list_lock);
>> +if (job) {
>> +need_do_recover = false;
>> +break;
>> +}
>> +}
>>
>>   /* Trigger recovery for world switch failure if no TDR */
>> -if (amdgpu_device_should_recover_gpu(adev)
>> -&& (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
>> +if (amdgpu_device_should_recover_gpu(adev) && (need_do_recover
>||
>> +adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
>>   adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
>>   adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
>>   adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] Revert "drm/amdgpu: disable gfxoff for navy_flounder"

2020-08-17 Thread Feng, Kenneth
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Kenneth Feng 


-Original Message-
From: Jiansong Chen  
Sent: Monday, August 17, 2020 10:46 PM
To: amd-gfx@lists.freedesktop.org
Cc: Zhou1, Tao ; Feng, Kenneth ; Chen, 
Jiansong (Simon) 
Subject: [PATCH] Revert "drm/amdgpu: disable gfxoff for navy_flounder"

This reverts commit 6a72ad7e387c6fec821c230fda3460f79fc0f877.
Newly released sdma fw (51.52) provides a fix for the issue.
---
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e87d43537013..e527be22a3d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3610,9 +3610,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct 
amdgpu_device *adev)
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
-   case CHIP_NAVY_FLOUNDER:
-   adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-   break;
default:
break;
}
-- 
2.25.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] Revert "drm/amdgpu: disable gfxoff for navy_flounder"

2020-08-17 Thread Zhou1, Tao
[AMD Public Use]

Reviewed-by: Tao Zhou 

> -Original Message-
> From: Jiansong Chen 
> Sent: Monday, August 17, 2020 10:46 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhou1, Tao ; Feng, Kenneth
> ; Chen, Jiansong (Simon) 
> Subject: [PATCH] Revert "drm/amdgpu: disable gfxoff for navy_flounder"
> 
> This reverts commit 6a72ad7e387c6fec821c230fda3460f79fc0f877.
> Newly released sdma fw (51.52) provides a fix for the issue.
> ---
>  drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 ---
>  1 file changed, 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> index e87d43537013..e527be22a3d5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> @@ -3610,9 +3610,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct
> amdgpu_device *adev)
>   if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
>   adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
>   break;
> - case CHIP_NAVY_FLOUNDER:
> - adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
> - break;
>   default:
>   break;
>   }
> --
> 2.25.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amdkfd: sparse: Fix warning in reading SDMA counters

2020-08-17 Thread Felix Kuehling
Sorry, more bike-shedding.

Am 2020-08-17 um 7:58 p.m. schrieb Mukul Joshi:
> Add __user annotation to fix related sparse warning while reading
> SDMA counters from userland.
>
> Reported-by: kernel test robot 
> Signed-off-by: Mukul Joshi 
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 9 +++--
>  1 file changed, 3 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index e0e60b0d0669..e2894967c372 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -157,19 +157,16 @@ int read_sdma_queue_counter(uint64_t q_rptr, uint64_t 
> *val)
>  {
>   int ret;
>   uint64_t tmp = 0;
> + uint64_t __user *sdma_usage_cntr;
>  
>   if (!val)
>   return -EINVAL;

Maybe this check isn't needed. Both callers pass in pointers to local
variables. If a caller gets that wrong, how likely are they going to
handle the error code correctly?


>   /*
>* SDMA activity counter is stored at queue's RPTR + 0x8 location.
>*/
> - if (!access_ok((const void __user *)(q_rptr +
> - sizeof(uint64_t)), sizeof(uint64_t))) {
> - pr_err("Can't access sdma queue activity counter\n");
> - return -EFAULT;
> - }
> + sdma_usage_cntr = (uint64_t __user *)q_rptr + 1;
>  
> - ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t)));
> + ret = get_user(tmp, sdma_usage_cntr);

Maybe you don't need sdma_usage_cntr. Just inline the pointer
arithmetic. And I'm not sure why you need tmp either. Is it in case the
read gets only one dword and fails on the second one? The callers will
ignore the value, if you return an error, so I don't think it matters.
So this whole function would become very simple:

return get_user(*val, (uint64_t __user *)q_rptr + 1);

Now it could probably be a static inline function in a
kfd_device_queue_manager.h.

If that is the only use of q_rptr in the function, why not make the
parameter type uint64_t __user *, so you don't even need the type cast
in here? You can also change the type in struct temp_sdma_queue_list to
match.

While you're at it, you could also change the types of read_ptr and
write_ptr in struct queue_properties, because uint32_t * is really not
the correct type. It's a pointer to the wrong size and the wrong address
space. Though that change may have a few more ripple effects.

Regards,
  Felix


>   if (!ret) {
>   *val = tmp;
>   }
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH v2] drm/amdkfd: sparse: Fix warning in reading SDMA counters

2020-08-17 Thread Mukul Joshi
Add __user annotation to fix related sparse warning while reading
SDMA counters from userland.

Reported-by: kernel test robot 
Signed-off-by: Mukul Joshi 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 9 +++--
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e0e60b0d0669..e2894967c372 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -157,19 +157,16 @@ int read_sdma_queue_counter(uint64_t q_rptr, uint64_t 
*val)
 {
int ret;
uint64_t tmp = 0;
+   uint64_t __user *sdma_usage_cntr;
 
if (!val)
return -EINVAL;
/*
 * SDMA activity counter is stored at queue's RPTR + 0x8 location.
 */
-   if (!access_ok((const void __user *)(q_rptr +
-   sizeof(uint64_t)), sizeof(uint64_t))) {
-   pr_err("Can't access sdma queue activity counter\n");
-   return -EFAULT;
-   }
+   sdma_usage_cntr = (uint64_t __user *)q_rptr + 1;
 
-   ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t)));
+   ret = get_user(tmp, sdma_usage_cntr);
if (!ret) {
*val = tmp;
}
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdkfd: sparse: Fix warning in reading SDMA counters

2020-08-17 Thread Felix Kuehling

Am 2020-08-17 um 4:45 p.m. schrieb Mukul Joshi:
> Add __user annotation to fix related sparse warning while reading
> SDMA counters from userland.
>
> Reported-by: kernel test robot 
> Signed-off-by: Mukul Joshi 
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 8 +---
>  1 file changed, 5 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index e0e60b0d0669..a6a4bbf99d9b 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -157,19 +157,21 @@ int read_sdma_queue_counter(uint64_t q_rptr, uint64_t 
> *val)
>  {
>   int ret;
>   uint64_t tmp = 0;
> + uint64_t __user *sdma_rptr;

This name is misleading. You never point this to the actual rptr. Call
this sdma_usage_cntr or something similar.


>  
>   if (!val)
>   return -EINVAL;
>   /*
>* SDMA activity counter is stored at queue's RPTR + 0x8 location.
>*/
> - if (!access_ok((const void __user *)(q_rptr +
> - sizeof(uint64_t)), sizeof(uint64_t))) {
> + sdma_rptr = (uint64_t *)(q_rptr + sizeof(uint64_t));

Should this cast to (uint64_t __user *)? A more elegant way to get the
offset would be:

    sdma_usage_cntr = (uint64_t __user *)q_rptr + 1;


> +
> + if (!access_ok((const void __user *)sdma_rptr, sizeof(uint64_t))) {

Is the explicit cast really needed here? And as far as I can tell
get_user already checks access_ok. So this check is probably redundant.

Regards,
  Felix


>   pr_err("Can't access sdma queue activity counter\n");
>   return -EFAULT;
>   }
>  
> - ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t)));
> + ret = get_user(tmp, sdma_rptr);
>   if (!ret) {
>   *val = tmp;
>   }
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdkfd: sparse: Fix warning in reading SDMA counters

2020-08-17 Thread Mukul Joshi
Add __user annotation to fix related sparse warning while reading
SDMA counters from userland.

Reported-by: kernel test robot 
Signed-off-by: Mukul Joshi 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e0e60b0d0669..a6a4bbf99d9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -157,19 +157,21 @@ int read_sdma_queue_counter(uint64_t q_rptr, uint64_t 
*val)
 {
int ret;
uint64_t tmp = 0;
+   uint64_t __user *sdma_rptr;
 
if (!val)
return -EINVAL;
/*
 * SDMA activity counter is stored at queue's RPTR + 0x8 location.
 */
-   if (!access_ok((const void __user *)(q_rptr +
-   sizeof(uint64_t)), sizeof(uint64_t))) {
+   sdma_rptr = (uint64_t *)(q_rptr + sizeof(uint64_t));
+
+   if (!access_ok((const void __user *)sdma_rptr, sizeof(uint64_t))) {
pr_err("Can't access sdma queue activity counter\n");
return -EFAULT;
}
 
-   ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t)));
+   ret = get_user(tmp, sdma_rptr);
if (!ret) {
*val = tmp;
}
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/scheduler: Scheduler priority fixes (v2)

2020-08-17 Thread Luben Tuikov
On 2020-08-17 9:53 a.m., Christian König wrote:
> Am 15.08.20 um 04:48 schrieb Luben Tuikov:
>> Remove DRM_SCHED_PRIORITY_LOW, as it was used
>> in only one place.
>>
>> Rename and separate by a line
>> DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT
>> as it represents a (total) count of said
>> priorities and it is used as such in loops
>> throughout the code. (0-based indexing is the
>> the count number.)
>>
>> Remove redundant word HIGH in priority names,
>> and rename *KERNEL* to *HIGH*, as it really
>> means that, high.
>>
>> v2: Add back KERNEL and remove SW and HW,
>>  in lieu of a single HIGH between NORMAL and KERNEL.
>>
>> Signed-off-by: Luben Tuikov 
> 
> I can't really judge the difference between MAX and COUNT, but the we 
> rename the values and get rid of the invalid one sounds like a good idea 
> to me.

Thanks Christian.

As to "max" vs. "count", I alluded to the difference
in the patch cover letter text:

> For instance, renaming MAX to COUNT, as usually a maximum value
> is a value which is part of the set of values, (e.g. a maxima of
> a function), and thus assignable, whereby a count is the size of
> a set (the enumeration in this case). It also makes it clearer
> when used to define size of arrays.

A "maximum" value is value which *can be attained.* For instance,
some would say, "The maximum temperature we expect today is 35 degC."
While a "count" is just the (usually integer) number of objects in a set.
The set could be composed of various objects, not necessarily integers.
It is possible that the maximum number in a set of integers to also
be the size of that set, e.g. A = { 1, 2, 3 }, max(A) = 3, sizeof(A) = 3,
but as you can see this is a special case; consider A = { Red, Green, Blue },
or A = { 2, 3, 5 }, or A = { 3 }.

To me it is confusing to read "MAX", as this is usually used
as a "watermark", say in temperature of a unit or something like that,
which we monitor and perform certain actions depending on whether
the maximum temperature is/has been attained. Usually, there'd
be one above it, called "CRITICAL".

And I've seen bugs where people would assume that MAX is an attainable
value, e.g. MAX_PRIORITY, "This is the maximum priority a task could
run at."

I'll add your RB to the patch! Thanks for your review.

Regards,
Luben

> 
> Reviewed-by: Christian König  for the series.
> 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   |  4 ++--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  6 +++---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  2 +-
>>   drivers/gpu/drm/scheduler/sched_main.c|  4 ++--
>>   include/drm/gpu_scheduler.h   | 12 +++-
>>   8 files changed, 18 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> index d85d13f7a043..68eaa4f687a6 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> @@ -46,7 +46,7 @@ const unsigned int 
>> amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
>>   static int amdgpu_ctx_priority_permit(struct drm_file *filp,
>>enum drm_sched_priority priority)
>>   {
>> -if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
>> +if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
>>  return -EINVAL;
>>   
>>  /* NORMAL and below are accessible by everyone */
>> @@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file 
>> *filp,
>>   static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum 
>> drm_sched_priority prio)
>>   {
>>  switch (prio) {
>> -case DRM_SCHED_PRIORITY_HIGH_HW:
>> +case DRM_SCHED_PRIORITY_HIGH:
>>  case DRM_SCHED_PRIORITY_KERNEL:
>>  return AMDGPU_GFX_PIPE_PRIO_HIGH;
>>  default:
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 75d37dfb51aa..bb9e5481ff3c 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct 
>> drm_gpu_scheduler *sched)
>>  int i;
>>   
>>  /* Signal all jobs not yet scheduled */
>> -for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
>> +for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; 
>> i--) {
>>  struct drm_sched_rq *rq = >sched_rq[i];
>>   
>>  if (!rq)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> index 13ea8ebc421c..6d4fc79bf84a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> @@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct 
>> 

[PATCH 1/4] drm/amdgpu/pm: remove duplicate check

2020-08-17 Thread Alex Deucher
FAMILY_KV is APUs and we already check for APUs.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5fc6a9a13096..f2e70655e8d9 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3311,8 +3311,7 @@ static umode_t hwmon_attributes_visible(struct kobject 
*kobj,
}
 
if (((adev->flags & AMD_IS_APU) ||
-adev->family == AMDGPU_FAMILY_SI ||/* not implemented yet 
*/
-adev->family == AMDGPU_FAMILY_KV) &&   /* not implemented yet 
*/
+adev->family == AMDGPU_FAMILY_SI) &&   /* not implemented yet 
*/
(attr == _dev_attr_power1_average.dev_attr.attr ||
 attr == _dev_attr_power1_cap_max.dev_attr.attr ||
 attr == _dev_attr_power1_cap_min.dev_attr.attr||
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/4] drm/amdgpu/swsmu: implement power metrics for RENOIR

2020-08-17 Thread Alex Deucher
Grab the data from the SMU metrics table.

Signed-off-by: Alex Deucher 
---

Can someone with a renoir system verify this?

 .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 21 +++
 1 file changed, 21 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index ac81f2f605a2..3b9ac72c7571 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -637,6 +637,23 @@ static int renoir_get_vddc(struct smu_context *smu, 
uint32_t *value,
return 0;
 }
 
+static int renoir_get_power(struct smu_context *smu, uint32_t *value)
+{
+   int ret = 0;
+   SmuMetrics_t metrics;
+
+   if (!value)
+   return -EINVAL;
+
+   ret = smu_cmn_get_metrics_table(smu, , false);
+   if (ret)
+   return ret;
+
+   *value = metrics.CurrentSocketPower << 8;
+
+   return 0;
+}
+
 /**
  * This interface get dpm clock table for dc
  */
@@ -981,6 +998,10 @@ static int renoir_read_sensor(struct smu_context *smu,
ret = renoir_get_vddc(smu, (uint32_t *)data, 1);
*size = 4;
break;
+   case AMDGPU_PP_SENSOR_GPU_POWER:
+   ret = renoir_get_power(smu, (uint32_t *)data);
+   *size = 4;
+   break;
default:
ret = -EOPNOTSUPP;
break;
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/4] drm/amdgpu/pm: only hide average power on SI and pre-RENOIR APUs

2020-08-17 Thread Alex Deucher
We can get this on RENOIR and newer via the SMU metrics
table.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/pm/amdgpu_pm.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c 
b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f2e70655e8d9..a77f7347fdfc 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3312,12 +3312,17 @@ static umode_t hwmon_attributes_visible(struct kobject 
*kobj,
 
if (((adev->flags & AMD_IS_APU) ||
 adev->family == AMDGPU_FAMILY_SI) &&   /* not implemented yet 
*/
-   (attr == _dev_attr_power1_average.dev_attr.attr ||
-attr == _dev_attr_power1_cap_max.dev_attr.attr ||
+   (attr == _dev_attr_power1_cap_max.dev_attr.attr ||
 attr == _dev_attr_power1_cap_min.dev_attr.attr||
 attr == _dev_attr_power1_cap.dev_attr.attr))
return 0;
 
+   if (((adev->family == AMDGPU_FAMILY_SI) ||
+((adev->flags & AMD_IS_APU) &&
+ (adev->asic_type < CHIP_RENOIR))) &&  /* not implemented yet 
*/
+   (attr == _dev_attr_power1_average.dev_attr.attr))
+   return 0;
+
if (!is_support_sw_smu(adev)) {
/* hide max/min values if we can't both query and manage the 
fan */
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/4] drm/amdgpu/swsmu: implement voltage metrics for RENOIR

2020-08-17 Thread Alex Deucher
Grab the data from the SMU metrics table.

Signed-off-by: Alex Deucher 
---
 .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 29 +++
 1 file changed, 29 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 186929c31e9e..ac81f2f605a2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -616,6 +616,27 @@ static int renoir_get_current_activity_percent(struct 
smu_context *smu,
return 0;
 }
 
+static int renoir_get_vddc(struct smu_context *smu, uint32_t *value,
+  unsigned int index)
+{
+   int ret = 0;
+   SmuMetrics_t metrics;
+
+   if (index >= 2)
+   return -EINVAL;
+
+   if (!value)
+   return -EINVAL;
+
+   ret = smu_cmn_get_metrics_table(smu, , false);
+   if (ret)
+   return ret;
+
+   *value = metrics.Voltage[index];
+
+   return 0;
+}
+
 /**
  * This interface get dpm clock table for dc
  */
@@ -952,6 +973,14 @@ static int renoir_read_sensor(struct smu_context *smu,
*(uint32_t *)data *= 100;
*size = 4;
break;
+   case AMDGPU_PP_SENSOR_VDDGFX:
+   ret = renoir_get_vddc(smu, (uint32_t *)data, 0);
+   *size = 4;
+   break;
+   case AMDGPU_PP_SENSOR_VDDNB:
+   ret = renoir_get_vddc(smu, (uint32_t *)data, 1);
+   *size = 4;
+   break;
default:
ret = -EOPNOTSUPP;
break;
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Add dsc_to_stream_resource for dcn3

2020-08-17 Thread Alex Deucher
On Mon, Aug 17, 2020 at 3:33 PM Bhawanpreet Lakha
 wrote:
>
> Without this, enabling dsc will cause a nullptr
>
> Reviewed-by: Mikita Lipski 
> Signed-off-by: Bhawanpreet Lakha 

Acked-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 1 +
>  1 file changed, 1 insertion(+)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c 
> b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
> index 1ee9087eec76..957fc37b971e 100644
> --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
> +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
> @@ -2424,6 +2424,7 @@ static const struct resource_funcs dcn30_res_pool_funcs 
> = {
> .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
> .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
> .add_stream_to_ctx = dcn30_add_stream_to_ctx,
> +   .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
> .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
> .populate_dml_writeback_from_context = 
> dcn30_populate_dml_writeback_from_context,
> .set_mcif_arb_params = dcn30_set_mcif_arb_params,
> --
> 2.17.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: Add dsc_to_stream_resource for dcn3

2020-08-17 Thread Bhawanpreet Lakha
Without this, enabling dsc will cause a nullptr

Reviewed-by: Mikita Lipski 
Signed-off-by: Bhawanpreet Lakha 
---
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 1ee9087eec76..957fc37b971e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -2424,6 +2424,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = 
{
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
+   .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = 
dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amd/display: use correct scale for actual_brightness

2020-08-17 Thread Alex Deucher
On Mon, Aug 17, 2020 at 3:09 AM Alexander Monakov  wrote:
>
> Ping.

Patch looks good to me:
Reviewed-by: Alex Deucher 

Nick, unless you have any objections, I'll go ahead and apply it.

Alex

>
> On Tue, 4 Aug 2020, Alexander Monakov wrote:
>
> > Documentation for sysfs backlight level interface requires that
> > values in both 'brightness' and 'actual_brightness' files are
> > interpreted to be in range from 0 to the value given in the
> > 'max_brightness' file.
> >
> > With amdgpu, max_brightness gives 255, and values written by the user
> > into 'brightness' are internally rescaled to a wider range. However,
> > reading from 'actual_brightness' gives the raw register value without
> > inverse rescaling. This causes issues for various userspace tools such
> > as PowerTop and systemd that expect the value to be in the correct
> > range.
> >
> > Introduce a helper to retrieve internal backlight range. Use it to
> > reimplement 'convert_brightness' as 'convert_brightness_from_user' and
> > introduce 'convert_brightness_to_user'.
> >
> > Bug: https://bugzilla.kernel.org/show_bug.cgi?id=203905
> > Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1242
> > Cc: Alex Deucher 
> > Cc: Nicholas Kazlauskas 
> > Signed-off-by: Alexander Monakov 
> > ---
> > v2: split convert_brightness to &_from_user and &_to_user (Nicholas)
> >
> >  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 81 +--
> >  1 file changed, 40 insertions(+), 41 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > index 710edc70e37e..b60a763f3f95 100644
> > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> > @@ -2881,51 +2881,50 @@ static int set_backlight_via_aux(struct dc_link 
> > *link, uint32_t brightness)
> >   return rc ? 0 : 1;
> >  }
> >
> > -static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
> > -   const uint32_t user_brightness)
> > +static int get_brightness_range(const struct amdgpu_dm_backlight_caps 
> > *caps,
> > + unsigned *min, unsigned *max)
> >  {
> > - u32 min, max, conversion_pace;
> > - u32 brightness = user_brightness;
> > -
> >   if (!caps)
> > - goto out;
> > + return 0;
> >
> > - if (!caps->aux_support) {
> > - max = caps->max_input_signal;
> > - min = caps->min_input_signal;
> > - /*
> > -  * The brightness input is in the range 0-255
> > -  * It needs to be rescaled to be between the
> > -  * requested min and max input signal
> > -  * It also needs to be scaled up by 0x101 to
> > -  * match the DC interface which has a range of
> > -  * 0 to 0x
> > -  */
> > - conversion_pace = 0x101;
> > - brightness =
> > - user_brightness
> > - * conversion_pace
> > - * (max - min)
> > - / AMDGPU_MAX_BL_LEVEL
> > - + min * conversion_pace;
> > + if (caps->aux_support) {
> > + // Firmware limits are in nits, DC API wants millinits.
> > + *max = 1000 * caps->aux_max_input_signal;
> > + *min = 1000 * caps->aux_min_input_signal;
> >   } else {
> > - /* TODO
> > -  * We are doing a linear interpolation here, which is OK but
> > -  * does not provide the optimal result. We probably want
> > -  * something close to the Perceptual Quantizer (PQ) curve.
> > -  */
> > - max = caps->aux_max_input_signal;
> > - min = caps->aux_min_input_signal;
> > -
> > - brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
> > -+ user_brightness * max;
> > - // Multiple the value by 1000 since we use millinits
> > - brightness *= 1000;
> > - brightness = DIV_ROUND_CLOSEST(brightness, 
> > AMDGPU_MAX_BL_LEVEL);
> > + // Firmware limits are 8-bit, PWM control is 16-bit.
> > + *max = 0x101 * caps->max_input_signal;
> > + *min = 0x101 * caps->min_input_signal;
> >   }
> > + return 1;
> > +}
> >
> > -out:
> > - return brightness;
> > +static u32 convert_brightness_from_user(const struct 
> > amdgpu_dm_backlight_caps *caps,
> > + uint32_t brightness)
> > +{
> > + unsigned min, max;
> > +
> > + if (!get_brightness_range(caps, , ))
> > + return brightness;
> > +
> > + // Rescale 0..255 to min..max
> > + return min + DIV_ROUND_CLOSEST((max - min) * brightness,
> > +AMDGPU_MAX_BL_LEVEL);
> > +}
> > +
> > +static u32 convert_brightness_to_user(const struct 
> > 

Re: [PATCH] drm/amdkfd: Initialize SDMA activity counter to 0

2020-08-17 Thread Felix Kuehling
Am 2020-08-17 um 1:05 p.m. schrieb Mukul Joshi:
> To prevent reporting erroneous SDMA usage, initialize SDMA
> activity counter to 0 before using.
>
> Signed-off-by: Mukul Joshi 

Reviewed-by: Felix Kuehling 


> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_process.c | 1 +
>  1 file changed, 1 insertion(+)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> index 013c2b018edc..4480f905814c 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> @@ -270,6 +270,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, 
> struct attribute *attr,
>   kfd_sdma_activity_worker);
>  
>   sdma_activity_work_handler.pdd = pdd;
> + sdma_activity_work_handler.sdma_activity_counter = 0;
>  
>   schedule_work(_activity_work_handler.sdma_activity_work);
>  
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdkfd: Initialize SDMA activity counter to 0

2020-08-17 Thread Mukul Joshi
To prevent reporting erroneous SDMA usage, initialize SDMA
activity counter to 0 before using.

Signed-off-by: Mukul Joshi 
---
 drivers/gpu/drm/amd/amdkfd/kfd_process.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 013c2b018edc..4480f905814c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -270,6 +270,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct 
attribute *attr,
kfd_sdma_activity_worker);
 
sdma_activity_work_handler.pdd = pdd;
+   sdma_activity_work_handler.sdma_activity_counter = 0;
 
schedule_work(_activity_work_handler.sdma_activity_work);
 
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5.7 125/393] drm/amdgpu: use the unlocked drm_gem_object_put

2020-08-17 Thread Greg Kroah-Hartman
From: Emil Velikov 

[ Upstream commit 1a87f67a66de4ad0c0d79fd86b6c5273143387c3 ]

The driver does not hold struct_mutex, thus using the locked version of
the helper is incorrect.

Cc: Alex Deucher 
Cc: Christian König 
Cc: amd-gfx@lists.freedesktop.org
Fixes: a39414716ca0 ("drm/amdgpu: add independent DMA-buf import v9")
Signed-off-by: Emil Velikov 
Acked-by: Sam Ravnborg 
Reviewed-by: Christian König 
Acked-by: Thomas Zimmermann 
Link: 
https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-8-emil.l.veli...@gmail.com
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ffeb20f11c07c..728f76cc536ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -552,7 +552,7 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct 
drm_device *dev,
attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
-   drm_gem_object_put(obj);
+   drm_gem_object_put_unlocked(obj);
return ERR_CAST(attach);
}
 
-- 
2.25.1



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5.8 143/464] drm/amdgpu: use the unlocked drm_gem_object_put

2020-08-17 Thread Greg Kroah-Hartman
From: Emil Velikov 

[ Upstream commit 1a87f67a66de4ad0c0d79fd86b6c5273143387c3 ]

The driver does not hold struct_mutex, thus using the locked version of
the helper is incorrect.

Cc: Alex Deucher 
Cc: Christian König 
Cc: amd-gfx@lists.freedesktop.org
Fixes: a39414716ca0 ("drm/amdgpu: add independent DMA-buf import v9")
Signed-off-by: Emil Velikov 
Acked-by: Sam Ravnborg 
Reviewed-by: Christian König 
Acked-by: Thomas Zimmermann 
Link: 
https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-8-emil.l.veli...@gmail.com
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 43d8ed7dbd001..652c57a3b8478 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -587,7 +587,7 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct 
drm_device *dev,
attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
-   drm_gem_object_put(obj);
+   drm_gem_object_put_unlocked(obj);
return ERR_CAST(attach);
}
 
-- 
2.25.1



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/dp_mst: Don't return error code when crtc is null

2020-08-17 Thread Lyude Paul
Oh-just noticed this is also missing a CC for sta...@vger.kernel.org. I'll add
it before pushing but please make sure to follow the guidelines here when
submitting fixes, since otherwise they might not get backported automatically
to older kernels:

https://www.kernel.org/doc/html/latest/process/stable-kernel-rules.html

(you can ignore the "It cannot be bigger than 100 lines, with context." part,
as long as you're not trying to backport new functionality to stable and
you're actually fixing something they're pretty leniant about that rule)

On Mon, 2020-08-17 at 11:21 -0400, Lyude Paul wrote:
> Reviewed-by: Lyude Paul 
> 
> I will go ahead and push this to drm-misc-fixes, thanks!
> 
> On Fri, 2020-08-14 at 13:01 -0400, Bhawanpreet Lakha wrote:
> > [Why]
> > In certain cases the crtc can be NULL and returning -EINVAL causes
> > atomic check to fail when it shouln't. This leads to valid
> > configurations failing because atomic check fails.
> > 
> > [How]
> > Don't early return if crtc is null
> > 
> > Signed-off-by: Bhawanpreet Lakha 
> > ---
> >  drivers/gpu/drm/drm_dp_mst_topology.c | 4 ++--
> >  1 file changed, 2 insertions(+), 2 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> > b/drivers/gpu/drm/drm_dp_mst_topology.c
> > index 70c4b7afed12..bc90a1485699 100644
> > --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> > @@ -5037,8 +5037,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct
> > drm_atomic_state *state, struct drm
> >  
> > crtc = conn_state->crtc;
> >  
> > -   if (WARN_ON(!crtc))
> > -   return -EINVAL;
> > +   if (!crtc)
> > +   continue;
> >  
> > if (!drm_dp_mst_dsc_aux_for_port(pos->port))
> > continue;
-- 
Cheers,
Lyude Paul (she/her)
Software Engineer at Red Hat

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/dp_mst: Don't return error code when crtc is null

2020-08-17 Thread Lyude Paul
Reviewed-by: Lyude Paul 

I will go ahead and push this to drm-misc-fixes, thanks!

On Fri, 2020-08-14 at 13:01 -0400, Bhawanpreet Lakha wrote:
> [Why]
> In certain cases the crtc can be NULL and returning -EINVAL causes
> atomic check to fail when it shouln't. This leads to valid
> configurations failing because atomic check fails.
> 
> [How]
> Don't early return if crtc is null
> 
> Signed-off-by: Bhawanpreet Lakha 
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 70c4b7afed12..bc90a1485699 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -5037,8 +5037,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct
> drm_atomic_state *state, struct drm
>  
>   crtc = conn_state->crtc;
>  
> - if (WARN_ON(!crtc))
> - return -EINVAL;
> + if (!crtc)
> + continue;
>  
>   if (!drm_dp_mst_dsc_aux_for_port(pos->port))
>   continue;
-- 
Cheers,
Lyude Paul (she/her)
Software Engineer at Red Hat

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] Revert "drm/amdgpu: disable gfxoff for navy_flounder"

2020-08-17 Thread Deucher, Alexander
[AMD Public Use]

Acked-by: Alex Deucher 

From: amd-gfx  on behalf of Jiansong 
Chen 
Sent: Monday, August 17, 2020 10:45 AM
To: amd-gfx@lists.freedesktop.org 
Cc: Zhou1, Tao ; Feng, Kenneth ; Chen, 
Jiansong (Simon) 
Subject: [PATCH] Revert "drm/amdgpu: disable gfxoff for navy_flounder"

This reverts commit 6a72ad7e387c6fec821c230fda3460f79fc0f877.
Newly released sdma fw (51.52) provides a fix for the issue.
---
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e87d43537013..e527be22a3d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3610,9 +3610,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct 
amdgpu_device *adev)
 if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 break;
-   case CHIP_NAVY_FLOUNDER:
-   adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-   break;
 default:
 break;
 }
--
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Calexander.deucher%40amd.com%7Cfe56ae8b28a642d0860008d842bc43e4%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637332723663638132sdata=xMP4WmjGoryNjXtsOAaOe%2FmzdL%2FWe1BEvOJ0DOAUAWo%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] Revert "drm/amdgpu: disable gfxoff for navy_flounder"

2020-08-17 Thread Jiansong Chen
This reverts commit 6a72ad7e387c6fec821c230fda3460f79fc0f877.
Newly released sdma fw (51.52) provides a fix for the issue.
---
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e87d43537013..e527be22a3d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3610,9 +3610,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct 
amdgpu_device *adev)
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
-   case CHIP_NAVY_FLOUNDER:
-   adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-   break;
default:
break;
}
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/5] drm/amd/pm: disable/enable deep sleep features on UMD pstate enter/exit

2020-08-17 Thread Deucher, Alexander
[AMD Public Use]

You can probably just squash patches 2-5 into one patch.  Either way, series is:
Reviewed-by: Alex Deucher 



From: Quan, Evan 
Sent: Monday, August 17, 2020 4:29 AM
To: amd-gfx@lists.freedesktop.org 
Cc: Deucher, Alexander ; Quan, Evan 

Subject: [PATCH 1/5] drm/amd/pm: disable/enable deep sleep features on UMD 
pstate enter/exit

Add deep sleep disablement/enablement on UMD pstate entering/exiting.

Change-Id: I4fbc02bb4a390ab82293a5ff9c91f2a8beb0a3c9
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 1 +
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c   | 2 ++
 drivers/gpu/drm/amd/pm/swsmu/smu_internal.h | 1 +
 3 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 7cc707ec21c3..4c5c041af4ee 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -597,6 +597,7 @@ struct pptable_funcs {
 ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
 int (*enable_mgpu_fan_boost)(struct smu_context *smu);
 int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
+   int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
 };

 typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 221b5c923ce1..8eb5b92903cd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1441,6 +1441,7 @@ static int smu_enable_umd_pstate(void *handle,

AMD_IP_BLOCK_TYPE_GFX,

AMD_CG_STATE_UNGATE);
 smu_gfx_ulv_control(smu, false);
+   smu_deep_sleep_control(smu, false);
 }
 } else {
 /* exit umd pstate, restore level, enable gfx cg*/
@@ -1448,6 +1449,7 @@ static int smu_enable_umd_pstate(void *handle,
 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
 *level = smu_dpm_ctx->saved_dpm_level;
 smu_dpm_ctx->enable_umd_pstate = false;
+   smu_deep_sleep_control(smu, true);
 smu_gfx_ulv_control(smu, true);
 amdgpu_device_ip_set_clockgating_state(smu->adev,

AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h 
b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 2fe29c6a00ce..c88f8fab1bae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -93,6 +93,7 @@
 #define smu_get_pp_feature_mask(smu, buf)   
smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
 #define smu_set_pp_feature_mask(smu, new_mask)  
smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
 #define smu_gfx_ulv_control(smu, enablement)
smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
+#define smu_deep_sleep_control(smu, enablement)
smu_ppt_funcs(deep_sleep_control, 0, smu, enablement)

 #endif
 #endif
--
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/5] drm/amd/pm: disable/enable gfx ulv on UMD pstate enter/exit

2020-08-17 Thread Deucher, Alexander
[AMD Official Use Only - Internal Distribution Only]

You can probably just squash patches 2-5 into one patch.  Either way, series is:
Reviewed-by: Alex Deucher 


From: Quan, Evan 
Sent: Monday, August 17, 2020 3:49 AM
To: amd-gfx@lists.freedesktop.org 
Cc: Deucher, Alexander ; Quan, Evan 

Subject: [PATCH 1/5] drm/amd/pm: disable/enable gfx ulv on UMD pstate enter/exit

Add gfx ulv disablement/enablement on UMD pstate entering/exiting.

Change-Id: Ieb38fdb5975b563f24c0b172fedd01acf99afb10
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 1 +
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c   | 2 ++
 drivers/gpu/drm/amd/pm/swsmu/smu_internal.h | 1 +
 3 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index bbe4a343e9f1..7cc707ec21c3 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -596,6 +596,7 @@ struct pptable_funcs {
 int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
 ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
 int (*enable_mgpu_fan_boost)(struct smu_context *smu);
+   int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
 };

 typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7d17c4f1b489..221b5c923ce1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1440,6 +1440,7 @@ static int smu_enable_umd_pstate(void *handle,
 amdgpu_device_ip_set_clockgating_state(smu->adev,

AMD_IP_BLOCK_TYPE_GFX,

AMD_CG_STATE_UNGATE);
+   smu_gfx_ulv_control(smu, false);
 }
 } else {
 /* exit umd pstate, restore level, enable gfx cg*/
@@ -1447,6 +1448,7 @@ static int smu_enable_umd_pstate(void *handle,
 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
 *level = smu_dpm_ctx->saved_dpm_level;
 smu_dpm_ctx->enable_umd_pstate = false;
+   smu_gfx_ulv_control(smu, true);
 amdgpu_device_ip_set_clockgating_state(smu->adev,

AMD_IP_BLOCK_TYPE_GFX,

AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h 
b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 264073d4e263..2fe29c6a00ce 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -92,6 +92,7 @@
 #define smu_get_asic_power_limits(smu)  
smu_ppt_funcs(get_power_limit, 0, smu)
 #define smu_get_pp_feature_mask(smu, buf)   
smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
 #define smu_set_pp_feature_mask(smu, new_mask)  
smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
+#define smu_gfx_ulv_control(smu, enablement)   
smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)

 #endif
 #endif
--
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/scheduler: Scheduler priority fixes (v2)

2020-08-17 Thread Christian König

Am 15.08.20 um 04:48 schrieb Luben Tuikov:

Remove DRM_SCHED_PRIORITY_LOW, as it was used
in only one place.

Rename and separate by a line
DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT
as it represents a (total) count of said
priorities and it is used as such in loops
throughout the code. (0-based indexing is the
the count number.)

Remove redundant word HIGH in priority names,
and rename *KERNEL* to *HIGH*, as it really
means that, high.

v2: Add back KERNEL and remove SW and HW,
 in lieu of a single HIGH between NORMAL and KERNEL.

Signed-off-by: Luben Tuikov 


I can't really judge the difference between MAX and COUNT, but the we 
rename the values and get rid of the invalid one sounds like a good idea 
to me.


Reviewed-by: Christian König  for the series.


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   |  4 ++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  6 +++---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  2 +-
  drivers/gpu/drm/scheduler/sched_main.c|  4 ++--
  include/drm/gpu_scheduler.h   | 12 +++-
  8 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d85d13f7a043..68eaa4f687a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] 
= {
  static int amdgpu_ctx_priority_permit(struct drm_file *filp,
  enum drm_sched_priority priority)
  {
-   if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+   if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
return -EINVAL;
  
  	/* NORMAL and below are accessible by everyone */

@@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
  static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum 
drm_sched_priority prio)
  {
switch (prio) {
-   case DRM_SCHED_PRIORITY_HIGH_HW:
+   case DRM_SCHED_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 75d37dfb51aa..bb9e5481ff3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct 
drm_gpu_scheduler *sched)
int i;
  
  	/* Signal all jobs not yet scheduled */

-   for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+   for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; 
i--) {
struct drm_sched_rq *rq = >sched_rq[i];
  
  		if (!rq)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13ea8ebc421c..6d4fc79bf84a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct 
amdgpu_ring *ring,
>sched;
}
  
-	for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)

+   for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
atomic_set(>num_jobs[i], 0);
  
  	return 0;

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index da871d84b742..7112137689db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -243,7 +243,7 @@ struct amdgpu_ring {
boolhas_compute_vm_bug;
boolno_scheduler;
  
-	atomic_t		num_jobs[DRM_SCHED_PRIORITY_MAX];

+   atomic_tnum_jobs[DRM_SCHED_PRIORITY_COUNT];
struct mutexpriority_mutex;
/* protected by priority_mutex */
int priority;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index c799691dfa84..17661ede9488 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int 
amdgpu_priority)
  {
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
-   return DRM_SCHED_PRIORITY_HIGH_HW;
+   return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_HIGH:
-   return DRM_SCHED_PRIORITY_HIGH_SW;
+   return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
-   return 

Re: [PATCH v2] drm/amdgpu: add condition check for trace_amdgpu_cs()

2020-08-17 Thread Christian König

Am 17.08.20 um 12:34 schrieb Kevin Wang:

v1:
add trace event enabled check to avoid nop loop when submit multi ibs
in amdgpu_cs_ioctl() function.

v2:
add a new wrapper function to trace all amdgpu cs ibs.

Signed-off-by: Kevin Wang 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 +---
  1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ffbcaf4bfb8b..1921e61a37b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1275,13 +1275,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
return r;
  }
  
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)

+{
+   int i;
+
+   if (!trace_amdgpu_cs_enabled())
+   return;
+
+   for (i = 0; i < parser->job->num_ibs; i++)
+   trace_amdgpu_cs(parser, i);
+}
+
  int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
  {
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
-   int i, r;
+   int r;
  
  	if (amdgpu_ras_intr_triggered())

return -EHWPOISON;
@@ -1319,8 +1330,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
  
  	reserved_buffers = true;
  
-	for (i = 0; i < parser.job->num_ibs; i++)

-   trace_amdgpu_cs(, i);
+   trace_amdgpu_cs_ibs();
  
  	r = amdgpu_cs_vm_handling();

if (r)


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH v2] drm/amdgpu: add condition check for trace_amdgpu_cs()

2020-08-17 Thread Kevin Wang
v1:
add trace event enabled check to avoid nop loop when submit multi ibs
in amdgpu_cs_ioctl() function.

v2:
add a new wrapper function to trace all amdgpu cs ibs.

Signed-off-by: Kevin Wang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 +---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ffbcaf4bfb8b..1921e61a37b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1275,13 +1275,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
return r;
 }
 
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
+{
+   int i;
+
+   if (!trace_amdgpu_cs_enabled())
+   return;
+
+   for (i = 0; i < parser->job->num_ibs; i++)
+   trace_amdgpu_cs(parser, i);
+}
+
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
-   int i, r;
+   int r;
 
if (amdgpu_ras_intr_triggered())
return -EHWPOISON;
@@ -1319,8 +1330,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
 
reserved_buffers = true;
 
-   for (i = 0; i < parser.job->num_ibs; i++)
-   trace_amdgpu_cs(, i);
+   trace_amdgpu_cs_ibs();
 
r = amdgpu_cs_vm_handling();
if (r)
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add condition check for trace_amdgpu_cs()

2020-08-17 Thread Christian König

Am 17.08.20 um 11:04 schrieb Kevin Wang:

add trace event enabled check to avoid nop loop when submit multi ibs
in amdgpu_cs_ioctl() function.


Maybe we should change the trace point instead to trace all IBs with 
just a single call.




Signed-off-by: Kevin Wang 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 5 +++--
  1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ffbcaf4bfb8b..409694f074ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1319,8 +1319,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
  
  	reserved_buffers = true;
  
-	for (i = 0; i < parser.job->num_ibs; i++)

-   trace_amdgpu_cs(, i);
+   if (trace_amdgpu_cs_enabled())
+   for (i = 0; i < parser.job->num_ibs; i++)
+   trace_amdgpu_cs(, i);
  
  	r = amdgpu_cs_vm_handling();

if (r)


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix amdgpu_bo_release_notify() comment error

2020-08-17 Thread Christian König

Am 17.08.20 um 09:35 schrieb Kevin Wang:

fix amdgpu_bo_release_notify() comment error.

Signed-off-by: Kevin Wang 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3d95b3edb635..4cb750ed6851 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1301,7 +1301,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
  }
  
  /**

- * amdgpu_bo_move_notify - notification about a BO being released
+ * amdgpu_bo_release_notify - notification about a BO being released
   * @bo: pointer to a buffer object
   *
   * Wipes VRAM buffers whose contents should not be leaked before the


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: add condition check for trace_amdgpu_cs()

2020-08-17 Thread Kevin Wang
add trace event enabled check to avoid nop loop when submit multi ibs
in amdgpu_cs_ioctl() function.

Signed-off-by: Kevin Wang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ffbcaf4bfb8b..409694f074ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1319,8 +1319,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
 
reserved_buffers = true;
 
-   for (i = 0; i < parser.job->num_ibs; i++)
-   trace_amdgpu_cs(, i);
+   if (trace_amdgpu_cs_enabled())
+   for (i = 0; i < parser.job->num_ibs; i++)
+   trace_amdgpu_cs(, i);
 
r = amdgpu_cs_vm_handling();
if (r)
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix amdgpu_bo_release_notify() comment error

2020-08-17 Thread Nirmoy

Acked-by: Nirmoy Das 

On 8/17/20 9:35 AM, Kevin Wang wrote:

fix amdgpu_bo_release_notify() comment error.

Signed-off-by: Kevin Wang 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3d95b3edb635..4cb750ed6851 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1301,7 +1301,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
  }
  
  /**

- * amdgpu_bo_move_notify - notification about a BO being released
+ * amdgpu_bo_release_notify - notification about a BO being released
   * @bo: pointer to a buffer object
   *
   * Wipes VRAM buffers whose contents should not be leaked before the

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: fix amdgpu_bo_release_notify() comment error

2020-08-17 Thread Li, Dennis
[AMD Official Use Only - Internal Distribution Only]


Reviewed-by: Dennis Li 

-Original Message-
From: amd-gfx  On Behalf Of Kevin Wang
Sent: Monday, August 17, 2020 3:36 PM
To: amd-gfx@lists.freedesktop.org
Cc: Kuehling, Felix ; Wang, Kevin(Yang) 
; Koenig, Christian 
Subject: [PATCH] drm/amdgpu: fix amdgpu_bo_release_notify() comment error

fix amdgpu_bo_release_notify() comment error.

Signed-off-by: Kevin Wang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3d95b3edb635..4cb750ed6851 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1301,7 +1301,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,  
}
 
 /**
- * amdgpu_bo_move_notify - notification about a BO being released
+ * amdgpu_bo_release_notify - notification about a BO being released
  * @bo: pointer to a buffer object
  *
  * Wipes VRAM buffers whose contents should not be leaked before the
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CDennis.Li%40amd.com%7C2c67e088110b4b1a4e9f08d8428033ca%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637332465681957370sdata=wbSpFkp1XpgMw7eogSPgplu8ySGiIGAFVRSdlD%2BtYHo%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/5] drm/amd/pm: widely share the logic for deep sleep control

2020-08-17 Thread Evan Quan
Considering the same logic can be applied to Arcturus, Navi1X
and Sienna Cichlid.

Change-Id: I9b80956fee5b094ea0e102601add6c02e3429719
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/inc/smu_v11_0.h|  3 ++
 .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 35 +--
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 35 +--
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   | 35 +--
 .../gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c| 33 +
 5 files changed, 39 insertions(+), 102 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 89d70165ac44..1c9464826ff7 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -277,5 +277,8 @@ void smu_v11_0_init_gpu_metrics_v1_0(struct 
gpu_metrics_v1_0 *gpu_metrics);
 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
  bool enablement);
 
+int smu_v11_0_deep_sleep_control(struct smu_context *smu,
+bool enablement);
+
 #endif
 #endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 81b584abeea2..8347b1f2509f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2313,39 +2313,6 @@ static ssize_t arcturus_get_gpu_metrics(struct 
smu_context *smu,
return sizeof(struct gpu_metrics_v1_0);
 }
 
-static int arcturus_deep_sleep_control(struct smu_context *smu,
-  bool enablement)
-{
-   struct amdgpu_device *adev = smu->adev;
-   int ret = 0;
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
-   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_GFXCLK_BIT, enablement);
-   if (ret) {
-   dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", 
enablement ? "enable" : "disable");
-   return ret;
-   }
-   }
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
-   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_SOCCLK_BIT, enablement);
-   if (ret) {
-   dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", 
enablement ? "enable" : "disable");
-   return ret;
-   }
-   }
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
-   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, 
enablement);
-   if (ret) {
-   dev_err(adev->dev, "Failed to %s LCLK DS!\n", 
enablement ? "enable" : "disable");
-   return ret;
-   }
-   }
-
-   return ret;
-}
-
 static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
@@ -2425,7 +2392,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = arcturus_get_gpu_metrics,
.gfx_ulv_control = smu_v11_0_gfx_ulv_control,
-   .deep_sleep_control = arcturus_deep_sleep_control,
+   .deep_sleep_control = smu_v11_0_deep_sleep_control,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ddb693888d64..72f3d68691d8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2578,39 +2578,6 @@ static int navi10_enable_mgpu_fan_boost(struct 
smu_context *smu)
   NULL);
 }
 
-static int navi10_deep_sleep_control(struct smu_context *smu,
-bool enablement)
-{
-   struct amdgpu_device *adev = smu->adev;
-   int ret = 0;
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
-   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_GFXCLK_BIT, enablement);
-   if (ret) {
-   dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", 
enablement ? "enable" : "disable");
-   return ret;
-   }
-   }
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
-   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_SOCCLK_BIT, enablement);
-   if (ret) {
-   dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", 
enablement ? "enable" : "disable");
-   return ret;
-   }
-   }
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
-   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, 
enablement);
-   if (ret) {
- 

[PATCH 3/5] drm/amd/pm: add Arcturus deep sleep control interface

2020-08-17 Thread Evan Quan
This is needed for UMD pstate switch.

Change-Id: I8e753c682c29fe420167b14b23f526ea9b0db42b
Signed-off-by: Evan Quan 
---
 .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 34 +++
 1 file changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index aab83b957246..81b584abeea2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2313,6 +2313,39 @@ static ssize_t arcturus_get_gpu_metrics(struct 
smu_context *smu,
return sizeof(struct gpu_metrics_v1_0);
 }
 
+static int arcturus_deep_sleep_control(struct smu_context *smu,
+  bool enablement)
+{
+   struct amdgpu_device *adev = smu->adev;
+   int ret = 0;
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, 
enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s LCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   return ret;
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
@@ -2392,6 +2425,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = arcturus_get_gpu_metrics,
.gfx_ulv_control = smu_v11_0_gfx_ulv_control,
+   .deep_sleep_control = arcturus_deep_sleep_control,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/5] drm/amd/pm: disable/enable deep sleep features on UMD pstate enter/exit

2020-08-17 Thread Evan Quan
Add deep sleep disablement/enablement on UMD pstate entering/exiting.

Change-Id: I4fbc02bb4a390ab82293a5ff9c91f2a8beb0a3c9
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 1 +
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c   | 2 ++
 drivers/gpu/drm/amd/pm/swsmu/smu_internal.h | 1 +
 3 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 7cc707ec21c3..4c5c041af4ee 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -597,6 +597,7 @@ struct pptable_funcs {
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
int (*enable_mgpu_fan_boost)(struct smu_context *smu);
int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
+   int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
 };
 
 typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 221b5c923ce1..8eb5b92903cd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1441,6 +1441,7 @@ static int smu_enable_umd_pstate(void *handle,
   
AMD_IP_BLOCK_TYPE_GFX,
   
AMD_CG_STATE_UNGATE);
smu_gfx_ulv_control(smu, false);
+   smu_deep_sleep_control(smu, false);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1448,6 +1449,7 @@ static int smu_enable_umd_pstate(void *handle,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = smu_dpm_ctx->saved_dpm_level;
smu_dpm_ctx->enable_umd_pstate = false;
+   smu_deep_sleep_control(smu, true);
smu_gfx_ulv_control(smu, true);
amdgpu_device_ip_set_clockgating_state(smu->adev,
   
AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h 
b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 2fe29c6a00ce..c88f8fab1bae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -93,6 +93,7 @@
 #define smu_get_pp_feature_mask(smu, buf)  
smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
 #define smu_set_pp_feature_mask(smu, new_mask) 
smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
 #define smu_gfx_ulv_control(smu, enablement)   
smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
+#define smu_deep_sleep_control(smu, enablement)
smu_ppt_funcs(deep_sleep_control, 0, smu, enablement)
 
 #endif
 #endif
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/5] drm/amd/pm: add Navi1x deep sleep control interface

2020-08-17 Thread Evan Quan
This is needed for UMD pstate switch.

Change-Id: Icd5d207359b7b83c1dd689a41fd9b48f537cde9a
Signed-off-by: Evan Quan 
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 34 +++
 1 file changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index c968f05533d9..ddb693888d64 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2578,6 +2578,39 @@ static int navi10_enable_mgpu_fan_boost(struct 
smu_context *smu)
   NULL);
 }
 
+static int navi10_deep_sleep_control(struct smu_context *smu,
+bool enablement)
+{
+   struct amdgpu_device *adev = smu->adev;
+   int ret = 0;
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, 
enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s LCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   return ret;
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
@@ -2661,6 +2694,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_gpu_metrics = navi10_get_gpu_metrics,
.enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
.gfx_ulv_control = smu_v11_0_gfx_ulv_control,
+   .deep_sleep_control = navi10_deep_sleep_control,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/5] drm/amd/pm: add Sienna Cichlid deep sleep control interface

2020-08-17 Thread Evan Quan
This is needed for UMD pstate switch.

Change-Id: I97ee3af60f4a3e4c2f575ce8c8e1a2866ed37f02
Signed-off-by: Evan Quan 
---
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   | 34 +++
 1 file changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 45b9defebd07..b2ad6a5f6728 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2718,6 +2718,39 @@ static int sienna_cichlid_enable_mgpu_fan_boost(struct 
smu_context *smu)
   NULL);
 }
 
+static int sienna_cichlid_deep_sleep_control(struct smu_context *smu,
+bool enablement)
+{
+   struct amdgpu_device *adev = smu->adev;
+   int ret = 0;
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, 
SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, 
enablement);
+   if (ret) {
+   dev_err(adev->dev, "Failed to %s LCLK DS!\n", 
enablement ? "enable" : "disable");
+   return ret;
+   }
+   }
+
+   return ret;
+}
+
 static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -2797,6 +2830,7 @@ static const struct pptable_funcs 
sienna_cichlid_ppt_funcs = {
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
.enable_mgpu_fan_boost = sienna_cichlid_enable_mgpu_fan_boost,
.gfx_ulv_control = smu_v11_0_gfx_ulv_control,
+   .deep_sleep_control = sienna_cichlid_deep_sleep_control,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/5] drm/amd/pm: widely share the logic for gfx ulv control

2020-08-17 Thread Evan Quan
Considering the same logic can be applied to Arcturus, Navi1X
and Sienna Cichlid.

Change-Id: I16958d114afbb2433789ca350019fea9b50e1218
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/inc/smu_v11_0.h  |  3 +++
 drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c   | 13 +
 drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 13 +
 .../gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 13 +
 drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c  | 11 +++
 5 files changed, 17 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 65363d56e3cc..89d70165ac44 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -274,5 +274,8 @@ int smu_v11_0_get_current_pcie_link_speed(struct 
smu_context *smu);
 
 void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics);
 
+int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement);
+
 #endif
 #endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index c82ef2872a50..aab83b957246 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2313,17 +2313,6 @@ static ssize_t arcturus_get_gpu_metrics(struct 
smu_context *smu,
return sizeof(struct gpu_metrics_v1_0);
 }
 
-static int arcturus_gfx_ulv_control(struct smu_context *smu,
-   bool enablement)
-{
-   int ret = 0;
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
-   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, 
enablement);
-
-   return ret;
-}
-
 static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
@@ -2402,7 +2391,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = arcturus_get_gpu_metrics,
-   .gfx_ulv_control = arcturus_gfx_ulv_control,
+   .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index c10119f29904..c968f05533d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2578,17 +2578,6 @@ static int navi10_enable_mgpu_fan_boost(struct 
smu_context *smu)
   NULL);
 }
 
-static int navi10_gfx_ulv_control(struct smu_context *smu,
- bool enablement)
-{
-   int ret = 0;
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
-   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, 
enablement);
-
-   return ret;
-}
-
 static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
@@ -2671,7 +2660,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = navi10_get_gpu_metrics,
.enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
-   .gfx_ulv_control = navi10_gfx_ulv_control,
+   .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 3559b33da0c4..45b9defebd07 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2718,17 +2718,6 @@ static int sienna_cichlid_enable_mgpu_fan_boost(struct 
smu_context *smu)
   NULL);
 }
 
-static int sienna_cichlid_gfx_ulv_control(struct smu_context *smu,
- bool enablement)
-{
-   int ret = 0;
-
-   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
-   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, 
enablement);
-
-   return ret;
-}
-
 static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -2807,7 +2796,7 @@ static const struct pptable_funcs 
sienna_cichlid_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
.enable_mgpu_fan_boost = 

[PATCH 3/5] drm/amd/pm: add Arcturus gfx ulv control interface

2020-08-17 Thread Evan Quan
This is needed for UMD pstate switch.

Change-Id: I40e235add95d6abbf99186112673a411edf2bb39
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 81f43fea4d52..c82ef2872a50 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2313,6 +2313,17 @@ static ssize_t arcturus_get_gpu_metrics(struct 
smu_context *smu,
return sizeof(struct gpu_metrics_v1_0);
 }
 
+static int arcturus_gfx_ulv_control(struct smu_context *smu,
+   bool enablement)
+{
+   int ret = 0;
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, 
enablement);
+
+   return ret;
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
@@ -2391,6 +2402,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = arcturus_get_gpu_metrics,
+   .gfx_ulv_control = arcturus_gfx_ulv_control,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/5] drm/amd/pm: disable/enable gfx ulv on UMD pstate enter/exit

2020-08-17 Thread Evan Quan
Add gfx ulv disablement/enablement on UMD pstate entering/exiting.

Change-Id: Ieb38fdb5975b563f24c0b172fedd01acf99afb10
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 1 +
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c   | 2 ++
 drivers/gpu/drm/amd/pm/swsmu/smu_internal.h | 1 +
 3 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index bbe4a343e9f1..7cc707ec21c3 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -596,6 +596,7 @@ struct pptable_funcs {
int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
int (*enable_mgpu_fan_boost)(struct smu_context *smu);
+   int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
 };
 
 typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7d17c4f1b489..221b5c923ce1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1440,6 +1440,7 @@ static int smu_enable_umd_pstate(void *handle,
amdgpu_device_ip_set_clockgating_state(smu->adev,
   
AMD_IP_BLOCK_TYPE_GFX,
   
AMD_CG_STATE_UNGATE);
+   smu_gfx_ulv_control(smu, false);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1447,6 +1448,7 @@ static int smu_enable_umd_pstate(void *handle,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = smu_dpm_ctx->saved_dpm_level;
smu_dpm_ctx->enable_umd_pstate = false;
+   smu_gfx_ulv_control(smu, true);
amdgpu_device_ip_set_clockgating_state(smu->adev,
   
AMD_IP_BLOCK_TYPE_GFX,
   
AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h 
b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 264073d4e263..2fe29c6a00ce 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -92,6 +92,7 @@
 #define smu_get_asic_power_limits(smu) 
smu_ppt_funcs(get_power_limit, 0, smu)
 #define smu_get_pp_feature_mask(smu, buf)  
smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
 #define smu_set_pp_feature_mask(smu, new_mask) 
smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
+#define smu_gfx_ulv_control(smu, enablement)   
smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
 
 #endif
 #endif
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/5] drm/amd/pm: add Navi1x gfx ulv control interface

2020-08-17 Thread Evan Quan
This is needed for UMD pstate switch.

Change-Id: Id14399d3a5e4b24bb8a72a298ec4e96717444741
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 4b4d461899df..c10119f29904 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2578,6 +2578,17 @@ static int navi10_enable_mgpu_fan_boost(struct 
smu_context *smu)
   NULL);
 }
 
+static int navi10_gfx_ulv_control(struct smu_context *smu,
+ bool enablement)
+{
+   int ret = 0;
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, 
enablement);
+
+   return ret;
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
@@ -2660,6 +2671,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = navi10_get_gpu_metrics,
.enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
+   .gfx_ulv_control = navi10_gfx_ulv_control,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/5] drm/amd/pm: add Sienna Cichlid gfx ulv control interface

2020-08-17 Thread Evan Quan
This is needed for UMD pstate switch.

Change-Id: I23d68bc291960118c799366cbb3fc89ccb42f98c
Signed-off-by: Evan Quan 
---
 .../gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c  | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 407a11c2826b..3559b33da0c4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2718,6 +2718,17 @@ static int sienna_cichlid_enable_mgpu_fan_boost(struct 
smu_context *smu)
   NULL);
 }
 
+static int sienna_cichlid_gfx_ulv_control(struct smu_context *smu,
+ bool enablement)
+{
+   int ret = 0;
+
+   if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+   ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, 
enablement);
+
+   return ret;
+}
+
 static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -2796,6 +2807,7 @@ static const struct pptable_funcs 
sienna_cichlid_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
.enable_mgpu_fan_boost = sienna_cichlid_enable_mgpu_fan_boost,
+   .gfx_ulv_control = sienna_cichlid_gfx_ulv_control,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix amdgpu_bo_release_notify() comment error

2020-08-17 Thread Kevin Wang
fix amdgpu_bo_release_notify() comment error.

Signed-off-by: Kevin Wang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3d95b3edb635..4cb750ed6851 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1301,7 +1301,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 }
 
 /**
- * amdgpu_bo_move_notify - notification about a BO being released
+ * amdgpu_bo_release_notify - notification about a BO being released
  * @bo: pointer to a buffer object
  *
  * Wipes VRAM buffers whose contents should not be leaked before the
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: should check error using DC_OK

2020-08-17 Thread Tong Zhang
core_link_read_dpcd returns only DC_OK(1) and DC_ERROR_UNEXPECTED(-1),
the caller should check error using DC_OK instead of checking against 0

Signed-off-by: Tong Zhang 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 5cb7b834e459..a60a457fcc8f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4376,9 +4376,9 @@ bool dc_link_get_backlight_level_nits(struct dc_link 
*link,
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
 
-   if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+   if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
-   sizeof(union dpcd_source_backlight_get)))
+   sizeof(union dpcd_source_backlight_get)) != DC_OK)
return false;
 
*backlight_millinits_avg =
@@ -4417,9 +4417,9 @@ bool dc_link_read_default_bl_aux(struct dc_link *link, 
uint32_t *backlight_milli
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
 
-   if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+   if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *) backlight_millinits,
-   sizeof(uint32_t)))
+   sizeof(uint32_t)) != DC_OK)
return false;
 
return true;
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amd/display: use correct scale for actual_brightness

2020-08-17 Thread Alexander Monakov
Ping.

On Tue, 4 Aug 2020, Alexander Monakov wrote:

> Documentation for sysfs backlight level interface requires that
> values in both 'brightness' and 'actual_brightness' files are
> interpreted to be in range from 0 to the value given in the
> 'max_brightness' file.
> 
> With amdgpu, max_brightness gives 255, and values written by the user
> into 'brightness' are internally rescaled to a wider range. However,
> reading from 'actual_brightness' gives the raw register value without
> inverse rescaling. This causes issues for various userspace tools such
> as PowerTop and systemd that expect the value to be in the correct
> range.
> 
> Introduce a helper to retrieve internal backlight range. Use it to
> reimplement 'convert_brightness' as 'convert_brightness_from_user' and
> introduce 'convert_brightness_to_user'.
> 
> Bug: https://bugzilla.kernel.org/show_bug.cgi?id=203905
> Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1242
> Cc: Alex Deucher 
> Cc: Nicholas Kazlauskas 
> Signed-off-by: Alexander Monakov 
> ---
> v2: split convert_brightness to &_from_user and &_to_user (Nicholas)
> 
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 81 +--
>  1 file changed, 40 insertions(+), 41 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 710edc70e37e..b60a763f3f95 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -2881,51 +2881,50 @@ static int set_backlight_via_aux(struct dc_link 
> *link, uint32_t brightness)
>   return rc ? 0 : 1;
>  }
>  
> -static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
> -   const uint32_t user_brightness)
> +static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
> + unsigned *min, unsigned *max)
>  {
> - u32 min, max, conversion_pace;
> - u32 brightness = user_brightness;
> -
>   if (!caps)
> - goto out;
> + return 0;
>  
> - if (!caps->aux_support) {
> - max = caps->max_input_signal;
> - min = caps->min_input_signal;
> - /*
> -  * The brightness input is in the range 0-255
> -  * It needs to be rescaled to be between the
> -  * requested min and max input signal
> -  * It also needs to be scaled up by 0x101 to
> -  * match the DC interface which has a range of
> -  * 0 to 0x
> -  */
> - conversion_pace = 0x101;
> - brightness =
> - user_brightness
> - * conversion_pace
> - * (max - min)
> - / AMDGPU_MAX_BL_LEVEL
> - + min * conversion_pace;
> + if (caps->aux_support) {
> + // Firmware limits are in nits, DC API wants millinits.
> + *max = 1000 * caps->aux_max_input_signal;
> + *min = 1000 * caps->aux_min_input_signal;
>   } else {
> - /* TODO
> -  * We are doing a linear interpolation here, which is OK but
> -  * does not provide the optimal result. We probably want
> -  * something close to the Perceptual Quantizer (PQ) curve.
> -  */
> - max = caps->aux_max_input_signal;
> - min = caps->aux_min_input_signal;
> -
> - brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
> -+ user_brightness * max;
> - // Multiple the value by 1000 since we use millinits
> - brightness *= 1000;
> - brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
> + // Firmware limits are 8-bit, PWM control is 16-bit.
> + *max = 0x101 * caps->max_input_signal;
> + *min = 0x101 * caps->min_input_signal;
>   }
> + return 1;
> +}
>  
> -out:
> - return brightness;
> +static u32 convert_brightness_from_user(const struct 
> amdgpu_dm_backlight_caps *caps,
> + uint32_t brightness)
> +{
> + unsigned min, max;
> +
> + if (!get_brightness_range(caps, , ))
> + return brightness;
> +
> + // Rescale 0..255 to min..max
> + return min + DIV_ROUND_CLOSEST((max - min) * brightness,
> +AMDGPU_MAX_BL_LEVEL);
> +}
> +
> +static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps 
> *caps,
> +   uint32_t brightness)
> +{
> + unsigned min, max;
> +
> + if (!get_brightness_range(caps, , ))
> + return brightness;
> +
> + if (brightness < min)
> + return 0;
> + // Rescale min..max to 0..255
> + return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
> +  max - min);
>  }

Re: TTM/nouveau conflict in drm-misc-next

2020-08-17 Thread Thomas Zimmermann
Hi

Am 14.08.20 um 18:21 schrieb Koenig, Christian:
> 
> 
> Am 14.08.2020 17:53 schrieb Alex Deucher :
> 
> On Fri, Aug 14, 2020 at 11:22 AM Christian König
>  wrote:
> >
> > Hey Thomas & Alex,
> >
> > well the TTM and Nouveau changes look good to me, but this completely
> > broke amdgpu.
> >
> > Alex any idea what is going on here?
> 
> What's broken in amdgpu?  There shouldn't be any ttm changes in amdgpu
> for drm-next.  Those all go through drm-misc.
> 
> 
> It's not a TTM change.
> 
> The backmerge of drm-next into drm-misc-next broke amdgpu so that even
> glxgears doesn't work anymore.
> 
> But each individual merge head still works fine as far as I can say.
> 
> Any idea how to track that down?

The backmerge brought in

  Fixes: 16e6eea29d7b ("Merge tag 'amd-drm-fixes-5.9-2020-08-07' ...)

which has quite a few commit. Maybe it's in one of them.

Best regards
Thomas

> 
> Christian.
> 
> 
> Alex
> 
> >
> > Regards,
> > Christian.
> >
> > Am 12.08.20 um 21:10 schrieb Thomas Zimmermann:
> > > Hi Christian and Ben,
> > >
> > > I backmerged drm-next into drm-misc-next and had a conflict between 
> ttm
> > > and nouveau. struct ttm_mem_res got renamed to struct ttm_resource. I
> > > updated nouveau to the new name, test-built, and pushed the result to
> > > drm-misc-next. If either of you has a minute, you may want to double
> > > check the merge.
> > >
> > > Best regards
> > > Thomas
> > >
> >
> > ___
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > 
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cchristian.koenig%40amd.com%7Ca1aefc1ee22a4e733df908d8406a395c%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637330172275088649sdata=X2ZJUETwoq884Xtg66sDudjXB%2F3s%2BgRglnh33gpU4Hc%3Dreserved=0
> 
> 

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Felix Imendörffer



signature.asc
Description: OpenPGP digital signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/7] drm/amd/display: Avoid using unvalidated tiling_flags and tmz_surface in prepare_planes

2020-08-17 Thread Marek Olšák
On Wed, Aug 12, 2020 at 9:54 AM Daniel Vetter  wrote:

> On Tue, Aug 11, 2020 at 09:42:11AM -0400, Marek Olšák wrote:
> > There are a few cases when the flags can change, for example DCC can be
> > disabled due to a hw limitation in the 3d engine. Modifiers give the
> > misleading impression that they help with that, but they don't. They
> don't
> > really help with anything.
>
> But if that happens, how do you tell the other side that it needs to
> sample new flags? Does that just happen all the time?
>
> Also do the DDC state changes happen for shared buffers too?
>

I thought we were only talking about shared buffers.

If the other side is only a consumer and the producer must disable DCC, the
producer decompresses DCC and then disables it and updates the BO flags.
The consumer doesn't need the new flags, because even if DCC stays enabled
in the consumer, it's in a decompressed state (it has no effect). Only the
producer knows it's disabled, and any new consumer will also know it when
it queries the latest BO flags.

It doesn't work if both sides use writes, because it's not communicated
that DCC is disabled (BO flags are queried only once). This hasn't been a
problem so far.

Is there a way to disable DCC correctly and safely across processes? Yes.
So why don't we do it? Because it would add more GPU overhead.

Marek
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx