RE: [PATCH] drm/amdgpu: Simplify amdgpu_lockup_timeout usage.

2017-12-13 Thread Liu, Monk
Oops, sorry I have a wrong watch on the patch 

KIQ still go directly 

-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Liu, 
Monk
Sent: 2017年12月14日 15:34
To: Grodzovsky, Andrey ; 
amd-gfx@lists.freedesktop.org; Koenig, Christian 
Cc: Grodzovsky, Andrey ; mar...@gmail.com
Subject: RE: [PATCH] drm/amdgpu: Simplify amdgpu_lockup_timeout usage.

-   timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
-   if (timeout == 0) {
-   /*
-* FIXME:
-* Delayed workqueue cannot use it directly,
-* so the scheduler will not use delayed workqueue if
-* MAX_SCHEDULE_TIMEOUT is set.
-* Currently keep it simple and silly.
-*/
-   timeout = MAX_SCHEDULE_TIMEOUT;
-   }
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
   num_hw_submission, amdgpu_job_hang_limit,
-  timeout, ring->name);
+  msecs_to_jiffies(amdgpu_lockup_timeout), 
ring->name);


What's the background for above change ? why use scheduler for KIQ ?

-Original Message-
From: Andrey Grodzovsky [mailto:andrey.grodzov...@amd.com]
Sent: 2017年12月14日 3:45
To: amd-gfx@lists.freedesktop.org; Koenig, Christian 
Cc: mar...@gmail.com; Liu, Monk ; Grodzovsky, Andrey 

Subject: [PATCH] drm/amdgpu: Simplify amdgpu_lockup_timeout usage.

With introduction of amdgpu_gpu_recovery we don't need any more to rely on 
amdgpu_lockup_timeout == 0 for disabling GPU reset.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c|  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 14 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c  |  2 +-
 5 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a074502..98fb9f9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1229,6 +1229,11 @@ static void amdgpu_check_arguments(struct amdgpu_device 
*adev)
 amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
+
+   if (amdgpu_lockup_timeout == 0) {
+   dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 
1\n");
+   amdgpu_lockup_timeout = 1;
+   }
 }
 
 /**
@@ -2831,7 +2836,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return false;
 
-   return amdgpu_lockup_timeout > 0 ? true : false;
+   return amdgpu_gpu_recovery;
 }
 
 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, diff 
--git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b734cd6..1fc5499 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;  int amdgpu_hw_i2c = 0;  int 
amdgpu_pcie_gen2 = -1;  int amdgpu_msi = -1; -int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 1;
 int amdgpu_dpm = -1;
 int amdgpu_fw_load_type = -1;
 int amdgpu_aspm = -1;
@@ -166,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); 
 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");  
module_param_named(msi, amdgpu_msi, int, 0444);
 
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = 
disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 
+1)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 854baf0..9484aed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring 
*ring,  int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
  unsigned num_hw_submission)
 {
-   long timeout;
int r;
 
/* Check that num_hw_submission is a power of two */ @@ -434,20 +433,9 
@@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
-   timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
-   if (timeout == 0) {
-   /*
-* FIXME:
-* Delayed workqueue cannot use it dire

RE: [PATCH] drm/amdgpu: Simplify amdgpu_lockup_timeout usage.

2017-12-13 Thread Liu, Monk
-   timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
-   if (timeout == 0) {
-   /*
-* FIXME:
-* Delayed workqueue cannot use it directly,
-* so the scheduler will not use delayed workqueue if
-* MAX_SCHEDULE_TIMEOUT is set.
-* Currently keep it simple and silly.
-*/
-   timeout = MAX_SCHEDULE_TIMEOUT;
-   }
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
   num_hw_submission, amdgpu_job_hang_limit,
-  timeout, ring->name);
+  msecs_to_jiffies(amdgpu_lockup_timeout), 
ring->name);


What's the background for above change ? why use scheduler for KIQ ?

-Original Message-
From: Andrey Grodzovsky [mailto:andrey.grodzov...@amd.com] 
Sent: 2017年12月14日 3:45
To: amd-gfx@lists.freedesktop.org; Koenig, Christian 
Cc: mar...@gmail.com; Liu, Monk ; Grodzovsky, Andrey 

Subject: [PATCH] drm/amdgpu: Simplify amdgpu_lockup_timeout usage.

With introduction of amdgpu_gpu_recovery we don't need any more to rely on 
amdgpu_lockup_timeout == 0 for disabling GPU reset.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c|  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 14 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c  |  2 +-
 5 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a074502..98fb9f9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1229,6 +1229,11 @@ static void amdgpu_check_arguments(struct amdgpu_device 
*adev)
 amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
+
+   if (amdgpu_lockup_timeout == 0) {
+   dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 
1\n");
+   amdgpu_lockup_timeout = 1;
+   }
 }
 
 /**
@@ -2831,7 +2836,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return false;
 
-   return amdgpu_lockup_timeout > 0 ? true : false;
+   return amdgpu_gpu_recovery;
 }
 
 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, diff 
--git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b734cd6..1fc5499 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;  int amdgpu_hw_i2c = 0;  int 
amdgpu_pcie_gen2 = -1;  int amdgpu_msi = -1; -int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 1;
 int amdgpu_dpm = -1;
 int amdgpu_fw_load_type = -1;
 int amdgpu_aspm = -1;
@@ -166,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); 
 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");  
module_param_named(msi, amdgpu_msi, int, 0444);
 
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = 
disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 
+1)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 854baf0..9484aed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring 
*ring,  int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
  unsigned num_hw_submission)
 {
-   long timeout;
int r;
 
/* Check that num_hw_submission is a power of two */ @@ -434,20 +433,9 
@@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
-   timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
-   if (timeout == 0) {
-   /*
-* FIXME:
-* Delayed workqueue cannot use it directly,
-* so the scheduler will not use delayed workqueue if
-* MAX_SCHEDULE_TIMEOUT is set.
-* Currently keep it simple and silly.
-*/
-   timeout = MAX_SCHEDULE_TIMEOUT;
-   }
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,

Re: [PATCH 2/2] drm/ttm: completely rework ttm_bo_delayed_delete

2017-12-13 Thread Thomas Hellstrom

On 12/13/2017 09:55 PM, Thomas Hellstrom wrote:

Hi, Christian,

While this has probably already been committed, and looks like a nice 
cleanup there are two things below I think needs fixing.


On 11/15/2017 01:31 PM, Christian König wrote:

There is no guarantee that the next entry on the ddelete list stays on
the list when we drop the locks.

Completely rework this mess by moving processed entries on a temporary
list.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/ttm/ttm_bo.c | 77 
++--

  1 file changed, 25 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7c1eac4f4b4b..ad0afdd71f21 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -572,71 +572,47 @@ static int ttm_bo_cleanup_refs(struct 
ttm_buffer_object *bo,

   * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
   * encountered buffers.
   */
-
-static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool 
remove_all)
+static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool 
remove_all)

  {
  struct ttm_bo_global *glob = bdev->glob;
-    struct ttm_buffer_object *entry = NULL;
-    int ret = 0;
-
-    spin_lock(&glob->lru_lock);
-    if (list_empty(&bdev->ddestroy))
-    goto out_unlock;
+    struct list_head removed;
+    bool empty;
  -    entry = list_first_entry(&bdev->ddestroy,
-    struct ttm_buffer_object, ddestroy);
-    kref_get(&entry->list_kref);
+    INIT_LIST_HEAD(&removed);
  -    for (;;) {
-    struct ttm_buffer_object *nentry = NULL;
-
-    if (entry->ddestroy.next != &bdev->ddestroy) {
-    nentry = list_first_entry(&entry->ddestroy,
-    struct ttm_buffer_object, ddestroy);
-    kref_get(&nentry->list_kref);
-    }
-
-    ret = reservation_object_trylock(entry->resv) ? 0 : -EBUSY;
-    if (remove_all && ret) {
-    spin_unlock(&glob->lru_lock);
-    ret = reservation_object_lock(entry->resv, NULL);
-    spin_lock(&glob->lru_lock);
-    }
+    spin_lock(&glob->lru_lock);
+    while (!list_empty(&bdev->ddestroy)) {
+    struct ttm_buffer_object *bo;
  -    if (!ret)
-    ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
-  true);
-    else
-    spin_unlock(&glob->lru_lock);
+    bo = list_first_entry(&bdev->ddestroy, struct 
ttm_buffer_object,

+  ddestroy);
+    kref_get(&bo->list_kref);
+    list_move_tail(&bo->ddestroy, &removed);
+    spin_unlock(&glob->lru_lock);
  -    kref_put(&entry->list_kref, ttm_bo_release_list);
-    entry = nentry;
+    reservation_object_lock(bo->resv, NULL);


Reservation may be a long lived lock, and typically if the object is 
reserved here, it's being evicted somewhere and there might be a 
substantial stall, which isn't really acceptable in the global 
workqueue. Better to move on to the next bo.
This function was really intended to be non-blocking, unless 
remove_all == true. I even think it's safe to keep the spinlock held 
on tryreserve?



  -    if (ret || !entry)
-    goto out;
+    spin_lock(&glob->lru_lock);
+    ttm_bo_cleanup_refs(bo, false, !remove_all, true);
  +    kref_put(&bo->list_kref, ttm_bo_release_list);


Calling a release function in atomic context is a bad thing. Nobody 
knows what locks needs to be taken in the release function and such 
code is prone to lock inversion and sleep-while-atomic bugs. Not long 
ago vfree() was even forbidden from atomic context. But here it's 
easily avoidable.


Hmm. It actually looks like ttm_bo_cleanup_refs unlocks the 
glob->lru_lock just loke ttm_bo_cleanup_refs_and_unlock did, so my 
latter comment actually isn't correct. Intuitively removing the "unlock" 
prefix from the function would also mean that the unlocking 
functionality went away, but that doesn't seem to be the case. Also the 
commit message "needed for the next patch" isn't very helpful when the 
next patch is actually commited much later...


The first comment about trylocking still holds, though.

/Thomas





/Thomas


___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: Add gpu_recovery parameter

2017-12-13 Thread Liu, Monk
> Problem with this is that amdgpu_check_soft_reset will not be called, this 
> function which prints which IP block was hung even when later we opt not to 
> recover.
I suggest instead to add a bool force_reset parameter to amdgpu_gpu_recover 
which will override amdgpu_gpu_recovery and we can set it to true from 
amdgpu_debugfs_gpu_recover only.

[ML] why you need "check_soft_reset" be called ? I think soft reset checking is 
useless totally ... because with TDR feature, the only thing can 
Tell  you if GPU hang is time out warning.

For soft checking, it only shows you if some IP is busy or not, but busy may 
not prove the engine is hang , it may just busy  


BR Monk

-Original Message-
From: Grodzovsky, Andrey 
Sent: 2017年12月13日 20:53
To: Koenig, Christian ; amd-gfx@lists.freedesktop.org
Cc: Liu, Monk ; mar...@gmail.com
Subject: Re: [PATCH] drm/amdgpu: Add gpu_recovery parameter



On 12/13/2017 07:20 AM, Christian König wrote:
> Am 12.12.2017 um 20:16 schrieb Andrey Grodzovsky:
>> Add new parameter to control GPU recovery procedure.
>> Retire old way of disabling GPU recovery by setting lockup_timeout ==
>> 0 and
>> set default for lockup_timeout to 10s.
>>
>> Signed-off-by: Andrey Grodzovsky 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h    | 1 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c    | 8 ++--
>>   3 files changed, 12 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index 3735500..26abe03 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
>>   extern int amdgpu_job_hang_limit;
>>   extern int amdgpu_lbpw;
>>   extern int amdgpu_compute_multipipe;
>> +extern int amdgpu_gpu_recovery;
>>     #ifdef CONFIG_DRM_AMDGPU_SI
>>   extern int amdgpu_si_support;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 8d03baa..d84b57a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -3030,6 +3030,11 @@ int amdgpu_gpu_recover(struct amdgpu_device 
>> *adev, struct amdgpu_job *job)
>>   return 0;
>>   }
>>   +    if (!amdgpu_gpu_recovery) {
>> +    DRM_INFO("GPU recovery disabled.\n");
>> +    return 0;
>> +    }
>> +
>
> Please move this check into the caller of amdgpu_gpu_recover().
>
> This way we can still trigger a GPU recovery manually or from the 
> hypervisor under SRIOV.
>
> Christian.

Problem with this is that amdgpu_check_soft_reset will not be called, this 
function which prints which IP block was hung even when later we opt not to 
recover.
I suggest instead to add a bool force_reset parameter to amdgpu_gpu_recover 
which will override amdgpu_gpu_recovery and we can set it to true from 
amdgpu_debugfs_gpu_recover only.

Thanks,
Andrey

>
>>   dev_info(adev->dev, "GPU reset begin!\n");
>>     mutex_lock(&adev->lock_reset); diff --git 
>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> index 0b039bd..5c612e9 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> @@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
>>   int amdgpu_hw_i2c = 0;
>>   int amdgpu_pcie_gen2 = -1;
>>   int amdgpu_msi = -1;
>> -int amdgpu_lockup_timeout = 0;
>> +int amdgpu_lockup_timeout = 1;
>>   int amdgpu_dpm = -1;
>>   int amdgpu_fw_load_type = -1;
>>   int amdgpu_aspm = -1;
>> @@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
>>   int amdgpu_job_hang_limit = 0;
>>   int amdgpu_lbpw = -1;
>>   int amdgpu_compute_multipipe = -1;
>> +int amdgpu_gpu_recovery = 1;
>>     MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in 
>> megabytes");
>>   module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ 
>> -165,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 
>> 0444);
>>   MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = 
>> auto)");
>>   module_param_named(msi, amdgpu_msi, int, 0444);
>>   -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms 
>> (default 0 = disable)");
>> +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default
>> 1)");
>>   module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 
>> 0444);
>>     MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = 
>> auto)"); @@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, 
>> int, 0444);
>>   MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be 
>> spread across pipes (1 = enable, 0 = disable, -1 = auto)");
>>   module_param_named(compute_multipipe, amdgpu_compute_multipipe, 
>> int, 0444);
>>   +MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 
>> = enable (default) , 0 = disable");
>> +module_param_named(gpu_recovery, amdgpu_gpu

RE: [PATCH] drm/amdgpu: Add gpu_recovery parameter

2017-12-13 Thread Liu, Monk
Andrey

You patch looks breaks the logic for SRIOV, please check function 
"xgpu_ai_mailbox_flr_work"
This function manually triggers GPU_RECOVER by the will of hypervisor.

Your check of :
+   if (!amdgpu_gpu_recovery) {
+   DRM_INFO("GPU recovery disabled.\n");
+   return 0;
+   }

Actually breaks the SRIOV logic 

I have two idea:
1) Please change to : If (!amdgpu_gpu_recover && !amdgpu_sriov_vf(adev))
2) please add another parameter "force_gpu_recover", and set it to true in 
driver init stage, and 
  In your check, you can change to: if(!amdgpu_gpu_recovery && 
!force_gpu_recover)

BR Monk

-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of 
Andrey Grodzovsky
Sent: 2017年12月13日 3:16
To: amd-gfx@lists.freedesktop.org; Koenig, Christian 
Cc: Grodzovsky, Andrey ; Liu, Monk 
; mar...@gmail.com
Subject: [PATCH] drm/amdgpu: Add gpu_recovery parameter

Add new parameter to control GPU recovery procedure.
Retire old way of disabling GPU recovery by setting lockup_timeout == 0 and set 
default for lockup_timeout to 10s.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c| 8 ++--
 3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3735500..26abe03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;  extern int 
amdgpu_job_hang_limit;  extern int amdgpu_lbpw;  extern int 
amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
 
 #ifdef CONFIG_DRM_AMDGPU_SI
 extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8d03baa..d84b57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3030,6 +3030,11 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, 
struct amdgpu_job *job)
return 0;
}
 
+   if (!amdgpu_gpu_recovery) {
+   DRM_INFO("GPU recovery disabled.\n");
+   return 0;
+   }
+
dev_info(adev->dev, "GPU reset begin!\n");
 
mutex_lock(&adev->lock_reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0b039bd..5c612e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;  int amdgpu_hw_i2c = 0;  int 
amdgpu_pcie_gen2 = -1;  int amdgpu_msi = -1; -int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 1;
 int amdgpu_dpm = -1;
 int amdgpu_fw_load_type = -1;
 int amdgpu_aspm = -1;
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;  int amdgpu_job_hang_limit 
= 0;  int amdgpu_lbpw = -1;  int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = 1;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");  
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -165,7 +166,7 
@@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);  
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");  
module_param_named(msi, amdgpu_msi, int, 0444);
 
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = 
disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 
+1)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); @@ 
-280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);  
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across 
pipes (1 = enable, 0 = disable, -1 = auto)");  
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
 
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = 
+enable (default) , 0 = disable"); module_param_named(gpu_recovery, 
+amdgpu_gpu_recovery, int, 0444);
+
 #ifdef CONFIG_DRM_AMDGPU_SI
 
 #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add enumerate for PDB/PTB

2017-12-13 Thread Chunming Zhou



On 2017年12月14日 02:33, Christian König wrote:

Am 13.12.2017 um 08:19 schrieb Chunming Zhou:

Change-Id: Ic1f39d3bc853e9e4259d3e03a22920eda822eec5
Signed-off-by: Chunming Zhou 


You dropped reversing the ordering and replaced that with noting the 
root level separately? Nifty idea.

Yes.


Just please drop AMDGPU_VM_SUBPTB, translate further is something we 
hopefully will only use the first and last time for Raven.

dropped, please review the v2.



So I would like to keep that completely transparent to the VM code and 
do the patching in the GMC specific implementation for Raven.

go ahead, you can send your 2+1 patch again based on enumerate.

Regards,
David Zhou


Christian.


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 69 
+-

  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 14 +++
  2 files changed, 66 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 709587d8a77f..fc858ddf9319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -148,12 +148,29 @@ struct amdgpu_prt_cb {
  static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
    unsigned level)
  {
-    if (level != adev->vm_manager.num_level)
-    return 9 * (adev->vm_manager.num_level - level - 1) +
+    unsigned shift = 0xff;
+
+    switch (level) {
+    case AMDGPU_VM_PDB2:
+    case AMDGPU_VM_PDB1:
+    case AMDGPU_VM_PDB0:
+    shift = 9 * (adev->vm_manager.last_level - level - 1) +
  adev->vm_manager.block_size;
-    else
-    /* For the page tables on the leaves */
-    return 0;
+    break;
+    case AMDGPU_VM_PTB:
+    if (adev->vm_manager.last_level == AMDGPU_VM_PTB)
+    shift = 0;
+    else
+    shift = adev->vm_manager.block_size;
+    break;
+    case AMDGPU_VM_SUBPTB:
+    shift = 0;
+    break;
+    default:
+    dev_err(adev->dev, "the level%d isn't supported.\n", level);
+    }
+
+    return shift;
  }
    /**
@@ -166,12 +183,13 @@ static unsigned amdgpu_vm_level_shift(struct 
amdgpu_device *adev,

  static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
    unsigned level)
  {
-    unsigned shift = amdgpu_vm_level_shift(adev, 0);
+    unsigned shift = amdgpu_vm_level_shift(adev,
+   adev->vm_manager.root_level);
  -    if (level == 0)
+    if (level == adev->vm_manager.root_level)
  /* For the root directory */
  return round_up(adev->vm_manager.max_pfn, 1 << shift) >> 
shift;

-    else if (level != adev->vm_manager.num_level)
+    else if (level != adev->vm_manager.last_level)
  /* Everything in between */
  return 512;
  else
@@ -385,7 +403,7 @@ static int amdgpu_vm_alloc_levels(struct 
amdgpu_device *adev,

  spin_unlock(&vm->status_lock);
  }
  -    if (level < adev->vm_manager.num_level) {
+    if (level < adev->vm_manager.last_level) {
  uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
  uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
  ((1 << shift) - 1);
@@ -431,7 +449,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
  saddr /= AMDGPU_GPU_PAGE_SIZE;
  eaddr /= AMDGPU_GPU_PAGE_SIZE;
  -    return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, 
eaddr, 0);

+    return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+  adev->vm_manager.root_level);
  }
    /**
@@ -1247,7 +1266,8 @@ int amdgpu_vm_update_directories(struct 
amdgpu_device *adev,

  return 0;
    error:
-    amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
+    amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+   adev->vm_manager.root_level);
  amdgpu_job_free(job);
  return r;
  }
@@ -1266,7 +1286,7 @@ void amdgpu_vm_get_entry(struct 
amdgpu_pte_update_params *p, uint64_t addr,

   struct amdgpu_vm_pt **entry,
   struct amdgpu_vm_pt **parent)
  {
-    unsigned level = 0;
+    unsigned level = p->adev->vm_manager.root_level;
    *parent = NULL;
  *entry = &p->vm->root;
@@ -1278,7 +1298,7 @@ void amdgpu_vm_get_entry(struct 
amdgpu_pte_update_params *p, uint64_t addr,

  addr &= (1ULL << shift) - 1;
  }
  -    if (level != p->adev->vm_manager.num_level)
+    if (level != p->adev->vm_manager.last_level)
  *entry = NULL;
  }
  @@ -1320,7 +1340,7 @@ static void 
amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,

  return;
  entry->huge = !!(flags & AMDGPU_PDE_PTE);
  -    amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.num_level 
- 1,

+    amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.last_level - 1,
 &dst, &flags);
    if (use_cpu_update) {
@@ -1636,7 +1656,8 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,

    error_free:
  amd

[PATCH] drm/amdgpu: add enumerate for PDB/PTB v2

2017-12-13 Thread Chunming Zhou
v2:
  remove SUBPTB member

Change-Id: Ic1f39d3bc853e9e4259d3e03a22920eda822eec5
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 70 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 12 ++
 2 files changed, 63 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 709587d8a77f..7e4a78179296 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -148,12 +148,23 @@ struct amdgpu_prt_cb {
 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
  unsigned level)
 {
-   if (level != adev->vm_manager.num_level)
-   return 9 * (adev->vm_manager.num_level - level - 1) +
+   unsigned shift = 0xff;
+
+   switch (level) {
+   case AMDGPU_VM_PDB2:
+   case AMDGPU_VM_PDB1:
+   case AMDGPU_VM_PDB0:
+   shift = 9 * (adev->vm_manager.last_level - level - 1) +
adev->vm_manager.block_size;
-   else
-   /* For the page tables on the leaves */
-   return 0;
+   break;
+   case AMDGPU_VM_PTB:
+   shift = 0;
+   break;
+   default:
+   dev_err(adev->dev, "the level%d isn't supported.\n", level);
+   }
+
+   return shift;
 }
 
 /**
@@ -166,12 +177,13 @@ static unsigned amdgpu_vm_level_shift(struct 
amdgpu_device *adev,
 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
  unsigned level)
 {
-   unsigned shift = amdgpu_vm_level_shift(adev, 0);
+   unsigned shift = amdgpu_vm_level_shift(adev,
+  adev->vm_manager.root_level);
 
-   if (level == 0)
+   if (level == adev->vm_manager.root_level)
/* For the root directory */
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
-   else if (level != adev->vm_manager.num_level)
+   else if (level != adev->vm_manager.last_level)
/* Everything in between */
return 512;
else
@@ -343,7 +355,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device 
*adev,
 
if (vm->pte_support_ats) {
init_value = AMDGPU_PTE_DEFAULT_ATC;
-   if (level != adev->vm_manager.num_level)
+   if (level != adev->vm_manager.last_level)
init_value |= AMDGPU_PDE_PTE;
 
}
@@ -385,7 +397,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device 
*adev,
spin_unlock(&vm->status_lock);
}
 
-   if (level < adev->vm_manager.num_level) {
+   if (level < adev->vm_manager.last_level) {
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1);
@@ -431,7 +443,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
-   return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
+   return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+ adev->vm_manager.root_level);
 }
 
 /**
@@ -1091,6 +1104,7 @@ static void amdgpu_vm_update_pde(struct 
amdgpu_pte_update_params *params,
for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
pbo = pbo->parent;
 
+   level += params->adev->vm_manager.root_level;
pt = amdgpu_bo_gpu_offset(bo);
flags = AMDGPU_PTE_VALID;
amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags);
@@ -1247,7 +1261,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device 
*adev,
return 0;
 
 error:
-   amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
+   amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+  adev->vm_manager.root_level);
amdgpu_job_free(job);
return r;
 }
@@ -1266,7 +1281,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params 
*p, uint64_t addr,
 struct amdgpu_vm_pt **entry,
 struct amdgpu_vm_pt **parent)
 {
-   unsigned level = 0;
+   unsigned level = p->adev->vm_manager.root_level;
 
*parent = NULL;
*entry = &p->vm->root;
@@ -1278,7 +1293,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params 
*p, uint64_t addr,
addr &= (1ULL << shift) - 1;
}
 
-   if (level != p->adev->vm_manager.num_level)
+   if (level != p->adev->vm_manager.last_level)
*entry = NULL;
 }
 
@@ -1320,7 +1335,7 @@ static void amdgpu_vm_handle_huge_pages(struct 
amdgpu_pte_update_params *p,
return;
entry->huge = !!(flags & AMDGPU_PDE_PTE);
 
-   amdgpu_ga

[PATCH] drm/amdgpu: Fix a bug that vm size is wrong on Raven

2017-12-13 Thread Yong Zhao
Change-Id: Id522c1cbadb8c069720f4e64a31cff42cd014733
Signed-off-by: Yong Zhao 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 709587d..3b9eb1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2534,7 +2534,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, 
uint32_t vm_size,
uint64_t tmp;
 
/* adjust vm size first */
-   if (amdgpu_vm_size != -1) {
+   if (amdgpu_vm_size != -1 && max_level == 1) {
unsigned max_size = 1 << (max_bits - 30);
 
vm_size = amdgpu_vm_size;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/ttm: init locked again to prevent incorrect unlock

2017-12-13 Thread Roger He
Change-Id: Icc8b5112570429f24e90d52484df2728c546f85b
Signed-off-by: Roger He 
Reviewed-by: Christian König 
Cc: sta...@vger.kernel.org
---
 drivers/gpu/drm/ttm/ttm_bo.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a3f908c..098b22e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -735,6 +735,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  place)) {
if (locked)
reservation_object_unlock(bo->resv);
+   locked = false;
continue;
}
break;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 02/30] drm/amd/display: Remove dead enable_plane function definition and call

2017-12-13 Thread Tom St Denis
Would this fix the regression I found on Carrizo after the drm-next rebase?

Tom



On December 13, 2017 5:34:34 PM EST, Harry Wentland  
wrote:
>Signed-off-by: Harry Wentland 
>Reviewed-by: Jordan Lazare 
>Reviewed-by: Tony Cheng 
>Acked-by: Harry Wentland 
>---
> drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ---
> drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h   | 4 
> 2 files changed, 7 deletions(-)
>
>diff --git
>a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>index 80d36610c302..f0002d63eb63 100644
>--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>@@ -2053,9 +2053,6 @@ enum dc_status dce110_apply_ctx_to_hw(
>   context,
>   dc);
> 
>-  if (dc->hwss.enable_plane)
>-  dc->hwss.enable_plane(dc, pipe_ctx, context);
>-
>   if (DC_OK != status)
>   return status;
>   }
>diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>index b6215ba514d8..5d2b05b93e76 100644
>--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>@@ -138,10 +138,6 @@ struct hw_sequencer_funcs {
> 
>   void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
> 
>-  void (*enable_plane)(struct dc *dc,
>-  struct pipe_ctx *pipe,
>-  struct dc_state *context);
>-
>   void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
> 
>   void (*enable_stream)(struct pipe_ctx *pipe_ctx);
>-- 
>2.14.1
>
>___
>amd-gfx mailing list
>amd-gfx@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: FW: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma buf.

2017-12-13 Thread Li, Samuel
Will do after some basic testing.

Sam

From: Deucher, Alexander
Sent: Wednesday, December 13, 2017 2:49 PM
To: Li, Samuel ; Koenig, Christian 
; amd-gfx@lists.freedesktop.org
Subject: Re: FW: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma 
buf.


Please send the drm prime patch to dri-devel if you didn't already.



Alex


From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Samuel Li mailto:samuel...@amd.com>>
Sent: Wednesday, December 13, 2017 2:17:49 PM
To: Koenig, Christian; 
amd-gfx@lists.freedesktop.org
Subject: Re: FW: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma 
buf.

For the record.


On 2017-12-13 01:26 PM, Christian König wrote:
> Actually we try to avoid that drivers define their own dma_buf_ops in DRM.
>
> That's why you have all those callbacks in drm_driver which just mirror the 
> dma_buf interface but unpack the GEM object from the dma-buf object.
>
> There are quite a number of exceptions, but those drivers then implement 
> everything on their own because the DRM marshaling doesn't make sense for 
> them.
>
> Christian.
>
> Am 13.12.2017 um 19:01 schrieb Samuel Li:
>> That is an approach. The cost is to add a new call back, which is not 
>> necessary though, since driver can always actually define their own 
>> dma_buf_ops.
>> The intention here is to allow a driver reuse drm_gem_prime_dmabuf_ops{}. If 
>> you would like to go this far, maybe a more straight forward way is to 
>> export those ops, e.g. drm_gem_map_attach, so that a driver can use them in 
>> its own definitions.
>>
>> Sam
>>
>>
>>
>> On 2017-12-13 05:23 AM, Christian König wrote:
>>> Something like the attached patch. Not even compile tested.
>>>
>>> Christian.
>>>
>>> Am 12.12.2017 um 20:13 schrieb Samuel Li:
 Not sure if I understand your comments correctly. Currently amdgpu prime 
 reuses drm_gem_prime_dmabuf_ops{}, and it is defined as static which is 
 reasonable. I do not see an easier way to introduce 
 amdgpu_gem_begin_cpu_access().

 Sam

 On 2017-12-12 01:30 PM, Christian König wrote:
>> +while (amdgpu_dmabuf_ops.begin_cpu_access != 
>> amdgpu_gem_begin_cpu_access)
> I would rather just add the four liner code to drm to forward the 
> begin_cpu_access callback into a drm_driver callback instead of all this.
>
> But apart from that it looks good to me.
>
> Christian.
>
> Am 12.12.2017 um 19:14 schrieb Li, Samuel:
>> A gentle ping on this one, Christian, can you take a look at this?
>>
>> Sam
>>
>> -Original Message-
>> From: Li, Samuel
>> Sent: Friday, December 08, 2017 5:22 PM
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Li, Samuel mailto:samuel...@amd.com>>
>> Subject: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma 
>> buf.
>>
>> To improve cpu read performance. This is implemented for APUs currently.
>>
>> v2: Adapt to change 
>> https://lists.freedesktop.org/archives/amd-gfx/2017-October/015174.html
>>
>> Change-Id: I7a583e23a9ee706e0edd2a46f4e4186a609368e3
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu.h   |  2 ++
>> drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   |  2 +-
>> drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 58 
>> +++
>> 3 files changed, 61 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index f8657c3..193db70 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -417,6 +417,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device 
>> *dev,  struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
>> struct drm_gem_object *gobj,
>> int flags);
>> +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
>> +struct dma_buf *dma_buf);
>> int amdgpu_gem_prime_pin(struct drm_gem_object *obj);  void 
>> amdgpu_gem_prime_unpin(struct drm_gem_object *obj);  struct 
>> reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> index 31383e0..df30b08 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> @@ -868,7 +868,7 @@ static struct drm_driver kms_driver = {
>> .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>> .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>> .gem_prime_export = amdgpu_gem_prime_export,
>> -.gem_prime_import = drm_gem_prime_import,
>> +.gem_prime_import = amdgpu

[PATCH 27/30] drm/amd/display: dal 3.1.27

2017-12-13 Thread Harry Wentland
From: Andrew Jiang 

Signed-off-by: Andrew Jiang 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 1b1c7300dfc3..e2e3c9df79ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.26"
+#define DC_VER "3.1.27"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 02/30] drm/amd/display: Remove dead enable_plane function definition and call

2017-12-13 Thread Tom St Denis
Would this fix the regression I found on Carrizo after the drm-next rebase?

Tom



On December 13, 2017 5:34:34 PM EST, Harry Wentland  
wrote:
>Signed-off-by: Harry Wentland 
>Reviewed-by: Jordan Lazare 
>Reviewed-by: Tony Cheng 
>Acked-by: Harry Wentland 
>---
> drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ---
> drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h   | 4 
> 2 files changed, 7 deletions(-)
>
>diff --git
>a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>index 80d36610c302..f0002d63eb63 100644
>--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
>@@ -2053,9 +2053,6 @@ enum dc_status dce110_apply_ctx_to_hw(
>   context,
>   dc);
> 
>-  if (dc->hwss.enable_plane)
>-  dc->hwss.enable_plane(dc, pipe_ctx, context);
>-
>   if (DC_OK != status)
>   return status;
>   }
>diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>index b6215ba514d8..5d2b05b93e76 100644
>--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
>@@ -138,10 +138,6 @@ struct hw_sequencer_funcs {
> 
>   void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
> 
>-  void (*enable_plane)(struct dc *dc,
>-  struct pipe_ctx *pipe,
>-  struct dc_state *context);
>-
>   void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
> 
>   void (*enable_stream)(struct pipe_ctx *pipe_ctx);
>-- 
>2.14.1
>
>___
>amd-gfx mailing list
>amd-gfx@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 16/30] drm/amd/display: Use the maximum link setting which EDP reported.

2017-12-13 Thread Harry Wentland
From: Hugo Hu 

Signed-off-by: Hugo Hu 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 00528b214a9f..61e8c3e02d16 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1470,6 +1470,12 @@ void decide_link_settings(struct dc_stream_state *stream,
return;
}
 
+   /* EDP use the link cap setting */
+   if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
+   *link_setting = link->verified_link_cap;
+   return;
+   }
+
/* search for the minimum link setting that:
 * 1. is supported according to the link training result
 * 2. could support the b/w requested by the timing
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 29/30] drm/amd/display: Update FMT and OPPBUF functions

2017-12-13 Thread Harry Wentland
From: Eric Bernstein 

Updates to FMT and OPPBUF programming from HW team
pseudocode review.

Signed-off-by: Eric Bernstein 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 12 +---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c   | 72 --
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h   | 43 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c  | 16 -
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h  |  8 ---
 drivers/gpu/drm/amd/display/dc/inc/hw/opp.h| 23 +--
 6 files changed, 120 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 2ca364f30e1d..82572863acab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -462,9 +462,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space;
struct tg_color black_color = {0};
-   bool enableStereo= stream->timing.timing_3d_format == 
TIMING_3D_FORMAT_NONE ?
-   false:true;
-   bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
 
/* by upper caller loop, pipe0 is parent pipe and be called first.
 * back end is set up by for pipe0. Other children pipe share back end
@@ -499,11 +496,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
&stream->timing,
true);
 
-   pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
-   pipe_ctx->stream_res.opp,
-   enableStereo,
-   rightEyePolarity);
-
 #if 0 /* move to after enable_crtc */
/* TODO: OPP FMT, ABM. etc. should be done here. */
/* or FPGA now. instance 0 only. TODO: move to opp.c */
@@ -2251,10 +2243,10 @@ static void dcn10_setup_stereo(struct pipe_ctx 
*pipe_ctx, struct dc *dc)
 
dcn10_config_stereo_parameters(stream, &flags);
 
-   pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+   pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
flags.PROGRAM_STEREO == 1 ? true:false,
-   stream->timing.flags.RIGHT_EYE_3D_POLARITY == 1 ? true:false);
+   &stream->timing);
 
pipe_ctx->stream_res.tg->funcs->program_stereo(
pipe_ctx->stream_res.tg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 5f078868676c..f6ba0eef4489 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -296,13 +296,75 @@ void opp1_program_fmt(
return;
 }
 
-void opp1_set_stereo_polarity(
-   struct output_pixel_processor *opp,
-   bool enable, bool rightEyePolarity)
+void opp1_program_stereo(
+   struct output_pixel_processor *opp,
+   bool enable,
+   const struct dc_crtc_timing *timing)
 {
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
 
-   REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, enable);
+   uint32_t active_width = timing->h_addressable - timing->h_border_right 
- timing->h_border_right;
+   uint32_t space1_size = timing->v_total - timing->v_addressable;
+   /* TODO: confirm computation of space2_size */
+   uint32_t space2_size = timing->v_total - timing->v_addressable;
+
+   if (!enable) {
+   active_width = 0;
+   space1_size = 0;
+   space2_size = 0;
+   }
+
+   /* TODO: for which cases should FMT_STEREOSYNC_OVERRIDE be set? */
+   REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, 0);
+
+   REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, active_width);
+
+   /* Program OPPBUF_3D_VACT_SPACE1_SIZE and OPPBUF_VACT_SPACE2_SIZE 
registers
+* In 3D progressive frames, Vactive space happens only in between the 
2 frames,
+* so only need to program OPPBUF_3D_VACT_SPACE1_SIZE
+* In 3D alternative frames, left and right frames, top and bottom 
field.
+*/
+   if (timing->timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE)
+   REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, 
space2_size);
+   else
+   REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, 
space1_size);
+
+   /* TODO: Is programming of OPPBUF_DUMMY_DATA_R/G/B needed? */
+   /*
+   REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+   OPPBUF_DUMMY_DATA_R, data_r);
+   REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+   OPPBUF_DUMMY_DATA_G, data_g);
+   REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+   OPPBUF_DUMMY_DATA_B, _data_b);
+ 

[PATCH 30/30] drm/amd/display: Expose dpp1_set_cursor_attributes

2017-12-13 Thread Harry Wentland
From: Yue Hin Lau 

Signed-off-by: Yue Hin Lau 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 640edfa05c94..f56ee4d08d89 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1283,6 +1283,10 @@ enum dcn10_input_csc_select {
INPUT_CSC_SELECT_COMA
 };
 
+void dpp1_set_cursor_attributes(
+   struct dpp *dpp_base,
+   enum dc_cursor_color_format color_format);
+
 bool dpp1_dscl_is_lb_conf_valid(
int ceil_vratio,
int num_partitions,
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 23/30] drm/amd/display: hubp refactor

2017-12-13 Thread Harry Wentland
From: Yue Hin Lau 

Signed-off-by: Yue Hin Lau 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c |  18 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 216 +++---
 2 files changed, 120 insertions(+), 114 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 943b7ac17ed9..585b33384002 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -107,10 +107,12 @@ static void hubp1_vready_workaround(struct hubp *hubp,
 }
 
 void hubp1_program_tiling(
-   struct dcn10_hubp *hubp1,
+   struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
 {
+   struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
REG_UPDATE_6(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
NUM_BANKS, log_2(info->gfx9.num_banks),
@@ -127,13 +129,14 @@ void hubp1_program_tiling(
 }
 
 void hubp1_program_size_and_rotation(
-   struct dcn10_hubp *hubp1,
+   struct hubp *hubp,
enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
 {
+   struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
 
/* Program data and meta surface pitch (calculation from addrlib)
@@ -189,9 +192,10 @@ void hubp1_program_size_and_rotation(
 }
 
 void hubp1_program_pixel_format(
-   struct dcn10_hubp *hubp1,
+   struct hubp *hubp,
enum surface_pixel_format format)
 {
+   struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t red_bar = 3;
uint32_t blue_bar = 2;
 
@@ -435,13 +439,11 @@ void hubp1_program_surface_config(
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
 {
-   struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-
hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
-   hubp1_program_tiling(hubp1, tiling_info, format);
+   hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size_and_rotation(
-   hubp1, rotation, format, plane_size, dcc, 
horizontal_mirror);
-   hubp1_program_pixel_format(hubp1, format);
+   hubp, rotation, format, plane_size, dcc, 
horizontal_mirror);
+   hubp1_program_pixel_format(hubp, format);
 }
 
 void hubp1_program_requestor(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 58a792f522f3..26f638d36a20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -127,112 +127,114 @@
SRI(CURSOR_HOT_SPOT, CURSOR, id), \
SRI(CURSOR_DST_OFFSET, CURSOR, id)
 
+#define HUBP_COMMON_REG_VARIABLE_LIST \
+   uint32_t DCHUBP_CNTL; \
+   uint32_t HUBPREQ_DEBUG_DB; \
+   uint32_t DCSURF_ADDR_CONFIG; \
+   uint32_t DCSURF_TILING_CONFIG; \
+   uint32_t DCSURF_SURFACE_PITCH; \
+   uint32_t DCSURF_SURFACE_PITCH_C; \
+   uint32_t DCSURF_SURFACE_CONFIG; \
+   uint32_t DCSURF_FLIP_CONTROL; \
+   uint32_t DCSURF_PRI_VIEWPORT_DIMENSION; \
+   uint32_t DCSURF_PRI_VIEWPORT_START; \
+   uint32_t DCSURF_SEC_VIEWPORT_DIMENSION; \
+   uint32_t DCSURF_SEC_VIEWPORT_START; \
+   uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
+   uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+   uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
+   uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
+   uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
+   uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS; \
+   uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; \
+   uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS; \
+   uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH; \
+   uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
+   uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
+   uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+   uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
+   uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+   uint32_t DCSURF_SURFACE_INUSE; \
+   uint32_t DCSURF_SURFACE_INUSE_HIGH; \
+   uint32_t DCSURF_SURFACE_INUSE_C; \
+   uint32_t DCSURF_SURFACE_INUSE_HIGH_C; \
+   uint32_t DCSURF_SURFACE_EARLIEST_INUSE; \
+   uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH; \
+   uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
+   uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
+   uint32_t DCSURF_SURFACE_CONTROL; \
+   uint32_t HUBPRET_CONTROL; \
+   uint32_t DCN_EXPANSION_MODE; \
+   uint32_t DCHUBP_REQ_SIZE_CONFIG; \
+   uint32_t DCHUBP_REQ_SIZE_CONFIG_C; \
+   u

[PATCH 19/30] drm/amd/display: fix 180 full screen pipe split

2017-12-13 Thread Harry Wentland
From: Dmytro Laktyushkin 

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Yongqiang Sun 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 14 --
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index bc1b5f42a0a4..95b8dd0e53c6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -669,12 +669,6 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx 
*pipe_ctx, struct view *r
if (pipe_ctx->plane_state->horizontal_mirror)
flip_horz_scan_dir = !flip_horz_scan_dir;
 
-   /* Temp W/A for rotated displays, ignore recout_skip */
-   if (flip_vert_scan_dir)
-   recout_skip->height = 0;
-   if (flip_horz_scan_dir)
-   recout_skip->width = 0;
-
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
rect_swap_helper(&src);
@@ -738,7 +732,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx 
*pipe_ctx, struct view *r
}
 
/* Adjust for non-0 viewport offset */
-   if (data->viewport.x) {
+   if (data->viewport.x && !flip_horz_scan_dir) {
int int_part;
 
data->inits.h = dal_fixed31_32_add(data->inits.h, 
dal_fixed31_32_mul_int(
@@ -759,7 +753,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx 
*pipe_ctx, struct view *r
data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
}
 
-   if (data->viewport_c.x) {
+   if (data->viewport_c.x && !flip_horz_scan_dir) {
int int_part;
 
data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, 
dal_fixed31_32_mul_int(
@@ -780,7 +774,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx 
*pipe_ctx, struct view *r
data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, 
int_part);
}
 
-   if (data->viewport.y) {
+   if (data->viewport.y && !flip_vert_scan_dir) {
int int_part;
 
data->inits.v = dal_fixed31_32_add(data->inits.v, 
dal_fixed31_32_mul_int(
@@ -801,7 +795,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx 
*pipe_ctx, struct view *r
data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
}
 
-   if (data->viewport_c.y) {
+   if (data->viewport_c.y && !flip_vert_scan_dir) {
int int_part;
 
data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, 
dal_fixed31_32_mul_int(
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/30] drm/amd/display: Define BLNDGAM_CONFIG_STATUS

2017-12-13 Thread Harry Wentland
From: Vitaly Prosyak 

Signed-off-by: Vitaly Prosyak 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index b6d526067cb5..a093ae5fc2de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -730,6 +730,7 @@
type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_BLNDGAM_LUT_WRITE_EN_MASK; \
type CM_BLNDGAM_LUT_WRITE_SEL; \
+   type CM_BLNDGAM_CONFIG_STATUS; \
type CM_BLNDGAM_LUT_INDEX; \
type BLNDGAM_MEM_PWR_FORCE; \
type CM_3DLUT_MODE; \
@@ -905,6 +906,7 @@
type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \
type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_SHAPER_LUT_WRITE_EN_MASK; \
+   type CM_SHAPER_CONFIG_STATUS; \
type CM_SHAPER_LUT_WRITE_SEL; \
type CM_SHAPER_LUT_INDEX; \
type CM_SHAPER_LUT_DATA; \
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 15/30] drm/amd/display: Add hdr_supported flag

2017-12-13 Thread Harry Wentland
From: Yongqiang Sun 

Signed-off-by: Yongqiang Sun 
Reviewed-by: Anthony Koo 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dc_types.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h 
b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9291a60126ad..9faddfae241d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -218,6 +218,7 @@ struct dc_edid_caps {
bool lte_340mcsc_scramble;
 
bool edid_hdmi;
+   bool hdr_supported;
 };
 
 struct view {
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/30] drm/amd/display: Print type if we get wrong ObjectID from bios

2017-12-13 Thread Harry Wentland
We've seen a bunch of issues where we can't get the connector from vbios
for what we think should be a valid connector id. Print some more info
when this happens.

Signed-off-by: Harry Wentland 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 14 +++---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  5 +++--
 2 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 21fb78e8048d..c00e405b63e8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -190,6 +190,7 @@ static struct graphics_object_id 
bios_parser_get_connector_id(
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct graphics_object_id object_id = dal_graphics_object_id_init(
0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+   uint16_t id;
 
uint32_t connector_table_offset = bp->object_info_tbl_offset
+ 
le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
@@ -197,12 +198,19 @@ static struct graphics_object_id 
bios_parser_get_connector_id(
ATOM_OBJECT_TABLE *tbl =
GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
 
-   if (tbl && tbl->ucNumberOfObjects > i) {
-   const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+   if (!tbl) {
+   dm_error("Can't get connector table from atom bios.\n");
+   return object_id;
+   }
 
-   object_id = object_id_from_bios_object_id(id);
+   if (tbl->ucNumberOfObjects <= i) {
+   dm_error("Can't find connector id %d in connector table of size 
%d.\n",
+i, tbl->ucNumberOfObjects);
+   return object_id;
}
 
+   id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+   object_id = object_id_from_bios_object_id(id);
return object_id;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 00130152f366..da83412af306 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -938,8 +938,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, 
init_params->connector_index);
 
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
-   dm_error("%s: Invalid Connector ObjectID from Adapter Service 
for connector index:%d!\n",
-   __func__, init_params->connector_index);
+   dm_error("%s: Invalid Connector ObjectID from Adapter Service 
for connector index:%d! type %d expected %d\n",
+__func__, init_params->connector_index,
+link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
 
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/30] drm/amd/display: Declare and share color space types for dcn's

2017-12-13 Thread Harry Wentland
From: Vitaly Prosyak 

Signed-off-by: Vitaly Prosyak 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c  | 145 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h   |   2 +-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c|  65 +++--
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  15 +--
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h|   2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h  |  21 +--
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h  |   4 +
 7 files changed, 168 insertions(+), 86 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 71993d5983bf..ebc96b720083 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -28,6 +28,8 @@
 #include "timing_generator.h"
 #include "hw_sequencer.h"
 
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
 /* used as index in array of black_color_format */
 enum black_color_format {
BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
@@ -38,6 +40,15 @@ enum black_color_format {
BLACK_COLOR_FORMAT_DEBUG,
 };
 
+enum dc_color_space_type {
+   COLOR_SPACE_RGB_TYPE,
+   COLOR_SPACE_RGB_LIMITED_TYPE,
+   COLOR_SPACE_YCBCR601_TYPE,
+   COLOR_SPACE_YCBCR709_TYPE,
+   COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+   COLOR_SPACE_YCBCR709_LIMITED_TYPE
+};
+
 static const struct tg_color black_color_format[] = {
/* BlackColorFormat_RGB_FullRange */
{0, 0, 0},
@@ -53,6 +64,140 @@ static const struct tg_color black_color_format[] = {
{0xff, 0xff, 0},
 };
 
+struct out_csc_color_matrix_type {
+   enum dc_color_space_type color_space_type;
+   uint16_t regval[12];
+};
+
+static const struct out_csc_color_matrix_type output_csc_matrix[] = {
+   { COLOR_SPACE_RGB_TYPE,
+   { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+   { COLOR_SPACE_RGB_LIMITED_TYPE,
+   { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 
0x201} },
+   { COLOR_SPACE_YCBCR601_TYPE,
+   { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 
0xFB45,
+   0xF6B7, 0xE04, 0x1004} },
+   { COLOR_SPACE_YCBCR709_TYPE,
+   { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
+   0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
+
+   /* TODO: correct values below */
+   { COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+   { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+   0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 
0x1000} },
+   { COLOR_SPACE_YCBCR709_LIMITED_TYPE,
+   { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+   0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+};
+
+static bool is_rgb_type(
+   enum dc_color_space color_space)
+{
+   bool ret = false;
+
+   if (color_space == COLOR_SPACE_SRGB ||
+   color_space == COLOR_SPACE_XR_RGB   ||
+   color_space == COLOR_SPACE_MSREF_SCRGB  ||
+   color_space == COLOR_SPACE_2020_RGB_FULLRANGE   ||
+   color_space == COLOR_SPACE_ADOBERGB ||
+   color_space == COLOR_SPACE_DCIP3||
+   color_space == COLOR_SPACE_DOLBYVISION)
+   ret = true;
+   return ret;
+}
+
+static bool is_rgb_limited_type(
+   enum dc_color_space color_space)
+{
+   bool ret = false;
+
+   if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+   color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
+   ret = true;
+   return ret;
+}
+
+static bool is_ycbcr601_type(
+   enum dc_color_space color_space)
+{
+   bool ret = false;
+
+   if (color_space == COLOR_SPACE_YCBCR601 ||
+   color_space == COLOR_SPACE_XV_YCC_601)
+   ret = true;
+   return ret;
+}
+
+static bool is_ycbcr601_limited_type(
+   enum dc_color_space color_space)
+{
+   bool ret = false;
+
+   if (color_space == COLOR_SPACE_YCBCR601_LIMITED)
+   ret = true;
+   return ret;
+}
+
+static bool is_ycbcr709_type(
+   enum dc_color_space color_space)
+{
+   bool ret = false;
+
+   if (color_space == COLOR_SPACE_YCBCR709 ||
+   color_space == COLOR_SPACE_XV_YCC_709)
+   ret = true;
+   return ret;
+}
+
+static bool is_ycbcr709_limited_type(
+   enum dc_color_space color_space)
+{
+   bool ret = false;
+
+   if (color_space == COLOR_SPACE_YCBCR709_LIMITED)
+   ret = true;
+   return ret;
+}
+enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
+{
+   enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
+
+   

[PATCH 17/30] drm/amd/display: Remove dwbc from pipe_ctx

2017-12-13 Thread Harry Wentland
From: Eric Bernstein 

Signed-off-by: Eric Bernstein 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h 
b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 9cc6bbb20714..d6971054ec07 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -212,7 +212,6 @@ struct pipe_ctx {
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
 #endif
-   struct dwbc *dwbc;
 };
 
 struct resource_context {
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 26/30] drm/amd/display: Fix unused variable warnings.

2017-12-13 Thread Harry Wentland
From: "Leo (Sunpeng) Li" 

... since linux kernel compile treats warnings as errors.

Signed-off-by: Leo (Sunpeng) Li 
Reviewed-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 ---
 1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c9d717cc7e47..2ca364f30e1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -465,8 +465,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
bool enableStereo= stream->timing.timing_3d_format == 
TIMING_3D_FORMAT_NONE ?
false:true;
bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
-   int width = stream->timing.h_addressable + stream->timing.h_border_left 
+ stream->timing.h_border_right;
-   int height = stream->timing.v_addressable + 
stream->timing.v_border_bottom + stream->timing.v_border_top;
 
/* by upper caller loop, pipe0 is parent pipe and be called first.
 * back end is set up by for pipe0. Other children pipe share back end
@@ -1813,9 +1811,6 @@ static void program_all_pipe_in_tree(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
 {
-   struct dc_stream_state *stream = pipe_ctx->stream;
-   int width = stream->timing.h_addressable + stream->timing.h_border_left 
+ stream->timing.h_border_right;
-   int height = stream->timing.v_addressable + 
stream->timing.v_border_bottom + stream->timing.v_border_top;
 
if (pipe_ctx->top_pipe == NULL) {
 
@@ -1942,8 +1937,6 @@ static void dcn10_apply_ctx_for_surface(
bool removed_pipe[4] = { false };
unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
bool program_water_mark = false;
-   int width = stream->timing.h_addressable + stream->timing.h_border_left 
+ stream->timing.h_border_right;
-   int height = stream->timing.v_addressable + 
stream->timing.v_border_bottom + stream->timing.v_border_top;
 
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/30] drm/amd/display: Fix check for whether dmcu fw is running

2017-12-13 Thread Harry Wentland
From: Anthony Koo 

Signed-off-by: Anthony Koo 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c   | 11 ++--
 drivers/gpu/drm/amd/display/dc/dce/dce_abm.c| 18 +++--
 drivers/gpu/drm/amd/display/dc/dce/dce_abm.h|  8 ++
 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c |  4 +--
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c   | 34 +++--
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h   |  3 +++
 drivers/gpu/drm/amd/display/dc/inc/hw/abm.h |  4 +--
 drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h|  1 +
 8 files changed, 54 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index da83412af306..a37428271573 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1930,12 +1930,18 @@ bool dc_link_set_backlight_level(const struct dc_link 
*link, uint32_t level,
 {
struct dc  *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
+   struct dmcu *dmcu = core_dc->res_pool->dmcu;
unsigned int controller_id = 0;
+   bool use_smooth_brightness = true;
int i;
 
-   if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+   if ((dmcu == NULL) ||
+   (abm == NULL) ||
+   (abm->funcs->set_backlight_level == NULL))
return false;
 
+   use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
"New Backlight level: %d (0x%X)\n", level, level);
 
@@ -1958,7 +1964,8 @@ bool dc_link_set_backlight_level(const struct dc_link 
*link, uint32_t level,
abm,
level,
frame_ramp,
-   controller_id);
+   controller_id,
+   use_smooth_brightness);
}
 
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 3fe8e697483f..b48190f54907 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -385,21 +385,12 @@ static bool dce_abm_init_backlight(struct abm *abm)
return true;
 }
 
-static bool is_dmcu_initialized(struct abm *abm)
-{
-   struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-   unsigned int dmcu_uc_reset;
-
-   REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
-
-   return !dmcu_uc_reset;
-}
-
 static bool dce_abm_set_backlight_level(
struct abm *abm,
unsigned int backlight_level,
unsigned int frame_ramp,
-   unsigned int controller_id)
+   unsigned int controller_id,
+   bool use_smooth_brightness)
 {
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
@@ -408,7 +399,7 @@ static bool dce_abm_set_backlight_level(
backlight_level, backlight_level);
 
/* If DMCU is in reset state, DMCU is uninitialized */
-   if (is_dmcu_initialized(abm))
+   if (use_smooth_brightness)
dmcu_set_backlight_level(abm_dce,
backlight_level,
frame_ramp,
@@ -425,8 +416,7 @@ static const struct abm_funcs dce_funcs = {
.init_backlight = dce_abm_init_backlight,
.set_backlight_level = dce_abm_set_backlight_level,
.get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
-   .set_abm_immediate_disable = dce_abm_immediate_disable,
-   .is_dmcu_initialized = is_dmcu_initialized
+   .set_abm_immediate_disable = dce_abm_immediate_disable
 };
 
 static void dce_abm_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h 
b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 59e909ec88f2..ff9436966041 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -37,8 +37,7 @@
SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
-   SR(MASTER_COMM_DATA_REG1), \
-   SR(DMCU_STATUS)
+   SR(MASTER_COMM_DATA_REG1)
 
 #define ABM_DCE110_COMMON_REG_LIST() \
ABM_COMMON_REG_LIST_DCE_BASE(), \
@@ -84,8 +83,7 @@
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
-   ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
-   ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+   ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh)
 
 #define ABM_MASK_SH_LIST_DCE110(mask_sh) \
ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), 

[PATCH 06/30] drm/amd/display: Do DC mode-change check after stream creation

2017-12-13 Thread Harry Wentland
From: "Leo (Sunpeng) Li" 

Do DC level mode change checks (via dc_stream_state) only when creating
a new stream, as this check is uneccessary without a new dc_stream_state
anyways. Doing so better demonstrates the intent of this mode-change
check, in comparison to guarding it with the 'enable' flag.

Signed-off-by: Leo (Sunpeng) Li 
Reviewed-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 15 ++-
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index efbc697266ff..5163cf6fb73c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4553,18 +4553,15 @@ static int dm_update_crtcs_state(struct dc *dc,
__func__, acrtc->base.base.id);
break;
}
-   }
-
-   if (enable && dc_is_stream_unchanged(new_stream, 
dm_old_crtc_state->stream) &&
-   dc_is_stream_scaling_unchanged(new_stream, 
dm_old_crtc_state->stream)) {
-
-   new_crtc_state->mode_changed = false;
 
-   DRM_DEBUG_DRIVER("Mode change not required, setting 
mode_changed to %d",
-new_crtc_state->mode_changed);
+   if (dc_is_stream_unchanged(new_stream, 
dm_old_crtc_state->stream) &&
+   dc_is_stream_scaling_unchanged(new_stream, 
dm_old_crtc_state->stream)) {
+   new_crtc_state->mode_changed = false;
+   DRM_DEBUG_DRIVER("Mode change not required, 
setting mode_changed to %d",
+new_crtc_state->mode_changed);
+   }
}
 
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto next_crtc;
 
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 13/30] drm/amd/display: Update HUBP

2017-12-13 Thread Harry Wentland
From: Eric Bernstein 

Signed-off-by: Eric Bernstein 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 31 ---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h |  7 +
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  | 15 +++
 3 files changed, 32 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 2d843b2d5f86..90c57a503302 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -776,20 +776,7 @@ void hubp1_read_state(struct dcn10_hubp *hubp1,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
 }
 
-enum cursor_pitch {
-   CURSOR_PITCH_64_PIXELS = 0,
-   CURSOR_PITCH_128_PIXELS,
-   CURSOR_PITCH_256_PIXELS
-};
-
-enum cursor_lines_per_chunk {
-   CURSOR_LINE_PER_CHUNK_2 = 1,
-   CURSOR_LINE_PER_CHUNK_4,
-   CURSOR_LINE_PER_CHUNK_8,
-   CURSOR_LINE_PER_CHUNK_16
-};
-
-static bool ippn10_cursor_program_control(
+bool hubp1_cursor_program_control(
struct dcn10_hubp *hubp1,
bool pixel_data_invert,
enum dc_cursor_color_format color_format)
@@ -810,8 +797,7 @@ static bool ippn10_cursor_program_control(
return true;
 }
 
-static enum cursor_pitch ippn10_get_cursor_pitch(
-   unsigned int pitch)
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch)
 {
enum cursor_pitch hw_pitch;
 
@@ -834,7 +820,7 @@ static enum cursor_pitch ippn10_get_cursor_pitch(
return hw_pitch;
 }
 
-static enum cursor_lines_per_chunk ippn10_get_lines_per_chunk(
+static enum cursor_lines_per_chunk hubp1_get_lines_per_chunk(
unsigned int cur_width,
enum dc_cursor_color_format format)
 {
@@ -860,8 +846,8 @@ void hubp1_cursor_set_attributes(
const struct dc_cursor_attributes *attr)
 {
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-   enum cursor_pitch hw_pitch = ippn10_get_cursor_pitch(attr->pitch);
-   enum cursor_lines_per_chunk lpc = ippn10_get_lines_per_chunk(
+   enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
+   enum cursor_lines_per_chunk lpc = hubp1_get_lines_per_chunk(
attr->width, attr->color_format);
 
hubp->curs_attr = *attr;
@@ -874,11 +860,13 @@ void hubp1_cursor_set_attributes(
REG_UPDATE_2(CURSOR_SIZE,
CURSOR_WIDTH, attr->width,
CURSOR_HEIGHT, attr->height);
+
REG_UPDATE_3(CURSOR_CONTROL,
CURSOR_MODE, attr->color_format,
CURSOR_PITCH, hw_pitch,
CURSOR_LINES_PER_CHUNK, lpc);
-   ippn10_cursor_program_control(hubp1,
+
+   hubp1_cursor_program_control(hubp1,
attr->attribute_flags.bits.INVERT_PIXEL_DATA,
attr->color_format);
 }
@@ -920,7 +908,8 @@ void hubp1_cursor_set_position(
cur_en = 0;  /* not visible beyond left edge*/
 
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
-   hubp1_cursor_set_attributes(hubp, &hubp->curs_attr);
+   hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a7834dd50716..17a5db0883b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -681,4 +681,11 @@ struct dcn_hubp_state {
 void hubp1_read_state(struct dcn10_hubp *hubp1,
struct dcn_hubp_state *s);
 
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
+
+bool hubp1_cursor_program_control(
+   struct dcn10_hubp *hubp1,
+   bool pixel_data_invert,
+   enum dc_cursor_color_format color_format);
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 49b12f602e79..6a4685f972e1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -28,6 +28,21 @@
 
 #include "mem_input.h"
 
+
+enum cursor_pitch {
+   CURSOR_PITCH_64_PIXELS = 0,
+   CURSOR_PITCH_128_PIXELS,
+   CURSOR_PITCH_256_PIXELS
+};
+
+enum cursor_lines_per_chunk {
+   CURSOR_LINE_PER_CHUNK_2 = 1,
+   CURSOR_LINE_PER_CHUNK_4,
+   CURSOR_LINE_PER_CHUNK_8,
+   CURSOR_LINE_PER_CHUNK_16
+};
+
+
 struct hubp {
struct hubp_funcs *funcs;
struct dc_context *ctx;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 28/30] drm/amd/display: check for null before calling is_blanked

2017-12-13 Thread Harry Wentland
From: Yue Hin Lau 

Signed-off-by: Yue Hin Lau 
Reviewed-by: Eric Bernstein 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ab875ea8aba4..35e84ed031de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -580,7 +580,7 @@ static void program_timing_sync(
for (j = 0; j < group_size; j++) {
struct pipe_ctx *temp;
 
-   if 
(!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+   if (pipe_set[j]->stream_res.tg->funcs->is_blanked && 
!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
if (j == 0)
break;
 
@@ -593,7 +593,7 @@ static void program_timing_sync(
 
/* remove any other unblanked pipes as they have already been 
synced */
for (j = j + 1; j < group_size; j++) {
-   if 
(!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+   if (pipe_set[j]->stream_res.tg->funcs->is_blanked && 
!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 25/30] drm/amd/display: Only blank DCN when we have set_blank implementation

2017-12-13 Thread Harry Wentland
From: Yue Hin Lau 

Also rename timing_generator to optc

Signed-off-by: Yue Hin Lau 
Reviewed-by: Eric Bernstein 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/Makefile  |  2 +-
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 33 +-
 .../{dcn10_timing_generator.c => dcn10_optc.c} |  6 ++--
 .../{dcn10_timing_generator.h => dcn10_optc.h} |  0
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/opp.h| 19 -
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h  |  1 +
 7 files changed, 32 insertions(+), 31 deletions(-)
 rename drivers/gpu/drm/amd/display/dc/dcn10/{dcn10_timing_generator.c => 
dcn10_optc.c} (99%)
 rename drivers/gpu/drm/amd/display/dc/dcn10/{dcn10_timing_generator.h => 
dcn10_optc.h} (100%)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile 
b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 9eac228315b5..5469bdfe19f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -23,7 +23,7 @@
 # Makefile for DCN.
 
 DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
-   dcn10_dpp.o dcn10_opp.o dcn10_timing_generator.o \
+   dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 824de3630889..c9d717cc7e47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -32,7 +32,7 @@
 #include "dce/dce_hwseq.h"
 #include "abm.h"
 #include "dmcu.h"
-#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10_optc.h"
 #include "dcn10/dcn10_dpp.h"
 #include "dcn10/dcn10_mpc.h"
 #include "timing_generator.h"
@@ -465,6 +465,8 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
bool enableStereo= stream->timing.timing_3d_format == 
TIMING_3D_FORMAT_NONE ?
false:true;
bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
+   int width = stream->timing.h_addressable + stream->timing.h_border_left 
+ stream->timing.h_border_right;
+   int height = stream->timing.v_addressable + 
stream->timing.v_border_bottom + stream->timing.v_border_top;
 
/* by upper caller loop, pipe0 is parent pipe and be called first.
 * back end is set up by for pipe0. Other children pipe share back end
@@ -518,11 +520,14 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
/* program otg blank color */
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
-   pipe_ctx->stream_res.tg->funcs->set_blank_color(
-   pipe_ctx->stream_res.tg,
-   &black_color);
 
-   if 
(!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
+   if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
+   pipe_ctx->stream_res.tg->funcs->set_blank_color(
+   pipe_ctx->stream_res.tg,
+   &black_color);
+
+   if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
+   
!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {

pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
false_optc_underflow_wa(dc, pipe_ctx->stream, 
pipe_ctx->stream_res.tg);
@@ -1808,6 +1813,10 @@ static void program_all_pipe_in_tree(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
 {
+   struct dc_stream_state *stream = pipe_ctx->stream;
+   int width = stream->timing.h_addressable + stream->timing.h_border_left 
+ stream->timing.h_border_right;
+   int height = stream->timing.v_addressable + 
stream->timing.v_border_bottom + stream->timing.v_border_top;
+
if (pipe_ctx->top_pipe == NULL) {
 
pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = 
pipe_ctx->pipe_dlg_param.vready_offset;
@@ -1818,7 +1827,11 @@ static void program_all_pipe_in_tree(
 
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
-   
pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, 
!is_pipe_tree_visible(pipe_ctx));
+
+   if (pipe_ctx->stream_res.tg->funcs->set_blank)
+   pipe_ctx->stream_res.tg->funcs->set_blank(
+   pipe_ctx->stream_res.tg,
+   !is_pipe_tree_visible(pipe_ctx));
}
 
if (pipe_ctx->plane_state != NULL) {
@@ -1925,9 +19

[PATCH 22/30] drm/amd/display: integrating optc pseudocode

2017-12-13 Thread Harry Wentland
From: Yue Hin Lau 

Signed-off-by: Yue Hin Lau 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 .../drm/amd/display/dc/dcn10/dcn10_timing_generator.h | 19 ++-
 .../gpu/drm/amd/display/dc/inc/hw/timing_generator.h  | 19 +++
 2 files changed, 37 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
index a9ce97fd7f09..eec860fa21e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
@@ -132,6 +132,10 @@ struct dcn_optc_registers {
uint32_t OPPBUF_CONTROL;
uint32_t OPPBUF_3D_PARAMETERS_0;
uint32_t CONTROL;
+   uint32_t OTG_GSL_WINDOW_X;
+   uint32_t OTG_GSL_WINDOW_Y;
+   uint32_t OTG_VUPDATE_KEEPOUT;
+   uint32_t OTG_DSC_START_POSITION;
 };
 
 #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -346,7 +350,20 @@ struct dcn_optc_registers {
type OTG_GSL2_EN;\
type OTG_GSL_MASTER_EN;\
type OTG_GSL_FORCE_DELAY;\
-   type OTG_GSL_CHECK_ALL_FIELDS;
+   type OTG_GSL_CHECK_ALL_FIELDS;\
+   type OTG_GSL_WINDOW_START_X;\
+   type OTG_GSL_WINDOW_END_X;\
+   type OTG_GSL_WINDOW_START_Y;\
+   type OTG_GSL_WINDOW_END_Y;\
+   type OTG_RANGE_TIMING_DBUF_UPDATE_MODE;\
+   type OTG_GSL_MASTER_MODE;\
+   type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
+   type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
+   type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
+   type OTG_DSC_START_POSITION_X;\
+   type OTG_DSC_START_POSITION_LINE_NUM;\
+   type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
+
 
 struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 11a1d3672584..ec312f1a3e55 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -42,6 +42,19 @@ struct dcp_gsl_params {
int gsl_master;
 };
 
+struct gsl_params {
+   int gsl0_en;
+   int gsl1_en;
+   int gsl2_en;
+   int gsl_master_en;
+   int gsl_master_mode;
+   int master_update_lock_gsl_en;
+   int gsl_window_start_x;
+   int gsl_window_end_x;
+   int gsl_window_start_y;
+   int gsl_window_end_y;
+};
+
 /* define the structure of Dynamic Refresh Mode */
 struct drr_params {
uint32_t vertical_total_min;
@@ -65,6 +78,12 @@ struct _dlg_otg_param {
enum signal_type signal;
 };
 
+struct vupdate_keepout_params {
+   int start_offset;
+   int end_offset;
+   int enable;
+};
+
 struct crtc_stereo_flags {
uint8_t PROGRAM_STEREO : 1;
uint8_t PROGRAM_POLARITY   : 1;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 24/30] drm/amd/display: Put dcn_mi_registers with other structs

2017-12-13 Thread Harry Wentland
From: Eric Bernstein 

Signed-off-by: Eric Bernstein 
Reviewed-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 14 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  |  1 -
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 26f638d36a20..33e91d9c010f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -230,12 +230,7 @@
uint32_t CURSOR_CONTROL; \
uint32_t CURSOR_POSITION; \
uint32_t CURSOR_HOT_SPOT; \
-   uint32_t CURSOR_DST_OFFSET;
-
-
-struct dcn_mi_registers {
-   HUBP_COMMON_REG_VARIABLE_LIST
-};
+   uint32_t CURSOR_DST_OFFSET
 
 #define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -399,9 +394,6 @@ struct dcn_mi_registers {
HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
 
-
-
-
 #define DCN_HUBP_REG_FIELD_LIST(type) \
type HUBP_BLANK_EN;\
type HUBP_TTU_DISABLE;\
@@ -581,6 +573,10 @@ struct dcn_mi_registers {
type CURSOR_DST_X_OFFSET; \
type OUTPUT_FP
 
+struct dcn_mi_registers {
+   HUBP_COMMON_REG_VARIABLE_LIST;
+};
+
 struct dcn_mi_shift {
DCN_HUBP_REG_FIELD_LIST(uint8_t);
 };
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 6a4685f972e1..b7c7e70022e4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -42,7 +42,6 @@ enum cursor_lines_per_chunk {
CURSOR_LINE_PER_CHUNK_16
 };
 
-
 struct hubp {
struct hubp_funcs *funcs;
struct dc_context *ctx;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/30] drm/amd/display: Remove dead enable_plane function definition and call

2017-12-13 Thread Harry Wentland
Signed-off-by: Harry Wentland 
Reviewed-by: Jordan Lazare 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ---
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h   | 4 
 2 files changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 80d36610c302..f0002d63eb63 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2053,9 +2053,6 @@ enum dc_status dce110_apply_ctx_to_hw(
context,
dc);
 
-   if (dc->hwss.enable_plane)
-   dc->hwss.enable_plane(dc, pipe_ctx, context);
-
if (DC_OK != status)
return status;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index b6215ba514d8..5d2b05b93e76 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -138,10 +138,6 @@ struct hw_sequencer_funcs {
 
void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
 
-   void (*enable_plane)(struct dc *dc,
-   struct pipe_ctx *pipe,
-   struct dc_state *context);
-
void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
 
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/30] drm/amd/display: Fix rehook MST display not light back on

2017-12-13 Thread Harry Wentland
From: "Jerry (Fangzhi) Zuo" 

Original applied dm_restore_drm_connector_state() has got removed.
Set link status to BAD before hotplug() event could trigger
another modeset from userspace.

The fix "Fix MST daisy chain SST not light up" commit makes so it is trying
to create a stream prior to dc_sink. That makes dc_sink is not present in
create_stream_for_sink().

Signed-off-by: Jerry (Fangzhi) Zuo 
Reviewed-by: Roman Li 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  | 13 +++---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h  |  2 +
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c| 51 ++
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.h|  1 +
 4 files changed, 62 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5163cf6fb73c..3f982aa56b01 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2356,7 +2356,7 @@ create_stream_for_sink(struct amdgpu_dm_connector 
*aconnector,
   const struct dm_connector_state *dm_state)
 {
struct drm_display_mode *preferred_mode = NULL;
-   const struct drm_connector *drm_connector;
+   struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
@@ -2375,11 +2375,13 @@ create_stream_for_sink(struct amdgpu_dm_connector 
*aconnector,
 
if (!aconnector->dc_sink) {
/*
-* Exclude MST from creating fake_sink
-* TODO: need to enable MST into fake_sink feature
+* Create dc_sink when necessary to MST
+* Don't apply fake_sink to MST
 */
-   if (aconnector->mst_port)
-   goto stream_create_fail;
+   if (aconnector->mst_port) {
+   dm_dp_mst_dc_sink_create(drm_connector);
+   goto mst_dc_sink_create_done;
+   }
 
if (create_fake_sink(aconnector))
goto stream_create_fail;
@@ -2430,6 +2432,7 @@ create_stream_for_sink(struct amdgpu_dm_connector 
*aconnector,
 stream_create_fail:
 dm_state_null:
 drm_connector_null:
+mst_dc_sink_create_done:
return stream;
 }
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 450379d684cb..3c9154f2d058 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
 
bool fake_enable;
+
+   bool mst_connected;
 };
 
 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, 
base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 707928b88448..f3d87f418d2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -180,6 +180,42 @@ static int dm_connector_update_modes(struct drm_connector 
*connector,
return drm_add_edid_modes(connector, edid);
 }
 
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+   struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
+   struct edid *edid;
+   struct dc_sink *dc_sink;
+   struct dc_sink_init_data init_params = {
+   .link = aconnector->dc_link,
+   .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+   edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, 
aconnector->port);
+
+   if (!edid) {
+   drm_mode_connector_update_edid_property(
+   &aconnector->base,
+   NULL);
+   return;
+   }
+
+   aconnector->edid = edid;
+
+   dc_sink = dc_link_add_remote_sink(
+   aconnector->dc_link,
+   (uint8_t *)aconnector->edid,
+   (aconnector->edid->extensions + 1) * EDID_LENGTH,
+   &init_params);
+
+   dc_sink->priv = aconnector;
+   aconnector->dc_sink = dc_sink;
+
+   amdgpu_dm_add_sink_to_freesync_module(
+   connector, aconnector->edid);
+
+   drm_mode_connector_update_edid_property(
+   &aconnector->base, aconnector->edid);
+}
+
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
@@ -306,6 +342,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_set_path_property(connector, 
pathprop);
 
drm_connec

[PATCH 20/30] drm/amd/display: Clean up DCN cursor code

2017-12-13 Thread Harry Wentland
From: Eric Bernstein 

Signed-off-by: Eric Bernstein 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 29 ---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h |  5 
 2 files changed, 5 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 90c57a503302..943b7ac17ed9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -776,27 +776,6 @@ void hubp1_read_state(struct dcn10_hubp *hubp1,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
 }
 
-bool hubp1_cursor_program_control(
-   struct dcn10_hubp *hubp1,
-   bool pixel_data_invert,
-   enum dc_cursor_color_format color_format)
-{
-   if (REG(CURSOR_SETTINS))
-   REG_SET_2(CURSOR_SETTINS, 0,
-   /* no shift of the cursor HDL schedule */
-   CURSOR0_DST_Y_OFFSET, 0,
-/* used to shift the cursor chunk request 
deadline */
-   CURSOR0_CHUNK_HDL_ADJUST, 3);
-   else
-   REG_SET_2(CURSOR_SETTINGS, 0,
-   /* no shift of the cursor HDL schedule */
-   CURSOR0_DST_Y_OFFSET, 0,
-/* used to shift the cursor chunk request 
deadline */
-   CURSOR0_CHUNK_HDL_ADJUST, 3);
-
-   return true;
-}
-
 enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch)
 {
enum cursor_pitch hw_pitch;
@@ -866,9 +845,11 @@ void hubp1_cursor_set_attributes(
CURSOR_PITCH, hw_pitch,
CURSOR_LINES_PER_CHUNK, lpc);
 
-   hubp1_cursor_program_control(hubp1,
-   attr->attribute_flags.bits.INVERT_PIXEL_DATA,
-   attr->color_format);
+   REG_SET_2(CURSOR_SETTINS, 0,
+   /* no shift of the cursor HDL schedule */
+   CURSOR0_DST_Y_OFFSET, 0,
+/* used to shift the cursor chunk request deadline */
+   CURSOR0_CHUNK_HDL_ADJUST, 3);
 }
 
 void hubp1_cursor_set_position(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 17a5db0883b9..58a792f522f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -683,9 +683,4 @@ void hubp1_read_state(struct dcn10_hubp *hubp1,
 
 enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
 
-bool hubp1_cursor_program_control(
-   struct dcn10_hubp *hubp1,
-   bool pixel_data_invert,
-   enum dc_cursor_color_format color_format);
-
 #endif
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 21/30] drm/amd/display: Call validate_fbc should_enable_fbc

2017-12-13 Thread Harry Wentland
validate_fbc never fails a modeset. It's simply used to decide whether
to use FBC or not. Calling it validate_fbc might be confusing to some so
rename it to should_enable_fbc.

With that let's also remove the DC_STATUS return code and return bool
and make enable_fbc a void function since we never check it's return
value and probably never want to anyways.

Signed-off-by: Harry Wentland 
Reviewed-by: Roman Li 
Acked-by: Harry Wentland 
---
 .../amd/display/dc/dce110/dce110_hw_sequencer.c| 39 +-
 1 file changed, 16 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index f0002d63eb63..86cdd7b4811f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1689,60 +1689,54 @@ static void apply_min_clocks(
 /*
  *  Check if FBC can be enabled
  */
-static enum dc_status validate_fbc(struct dc *dc,
-   struct dc_state *context)
+static bool should_enable_fbc(struct dc *dc,
+ struct dc_state *context)
 {
-   struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+   struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
 
ASSERT(dc->fbc_compressor);
 
/* FBC memory should be allocated */
if (!dc->ctx->fbc_gpu_addr)
-   return DC_ERROR_UNEXPECTED;
+   return false;
 
/* Only supports single display */
if (context->stream_count != 1)
-   return DC_ERROR_UNEXPECTED;
+   return false;
 
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
-   return DC_ERROR_UNEXPECTED;
+   return false;
 
/* PSR should not be enabled */
if (pipe_ctx->stream->sink->link->psr_enabled)
-   return DC_ERROR_UNEXPECTED;
+   return false;
 
/* Nothing to compress */
if (!pipe_ctx->plane_state)
-   return DC_ERROR_UNEXPECTED;
+   return false;
 
/* Only for non-linear tiling */
if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == 
DC_ARRAY_LINEAR_GENERAL)
-   return DC_ERROR_UNEXPECTED;
+   return false;
 
-   return DC_OK;
+   return true;
 }
 
 /*
  *  Enable FBC
  */
-static enum dc_status enable_fbc(struct dc *dc,
-   struct dc_state *context)
+static void enable_fbc(struct dc *dc,
+  struct dc_state *context)
 {
-   enum dc_status status = validate_fbc(dc, context);
-
-   if (status == DC_OK) {
+   if (should_enable_fbc(dc, context)) {
/* Program GRPH COMPRESSED ADDRESS and PITCH */
struct compr_addr_and_pitch_params params = {0, 0, 0};
struct compressor *compr = dc->fbc_compressor;
-   struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+   struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
 
-   params.source_view_width =
-   pipe_ctx->stream->timing.h_addressable;
-   params.source_view_height =
-   pipe_ctx->stream->timing.v_addressable;
+   params.source_view_width = 
pipe_ctx->stream->timing.h_addressable;
+   params.source_view_height = 
pipe_ctx->stream->timing.v_addressable;
 
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
 
@@ -1751,7 +1745,6 @@ static enum dc_status enable_fbc(struct dc *dc,
 
compr->funcs->enable_fbc(compr, ¶ms);
}
-   return status;
 }
 #endif
 
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 18/30] drm/amd/display: reprogram surface config on scaling change

2017-12-13 Thread Harry Wentland
From: Eric Yang 

When plane size changes, we need to reprogram surface pitch in addition
to viewport and scaler. This change is a conservative way to make this happen.
However it could be more optimized to move pitch programming into
mem_program_viewport.

Signed-off-by: Eric Yang 
Reviewed-by: Andrew Jiang 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index ee057de68ed2..824de3630889 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1782,7 +1782,8 @@ static void update_dchubp_dpp(
plane_state->update_flags.bits.rotation_change ||
plane_state->update_flags.bits.swizzle_change ||
plane_state->update_flags.bits.dcc_change ||
-   plane_state->update_flags.bits.bpp_change) {
+   plane_state->update_flags.bits.bpp_change ||
+   plane_state->update_flags.bits.scaling_change) {
hubp->funcs->hubp_program_surface_config(
hubp,
plane_state->format,
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 14/30] drm/amd/display: fix global sync param retrieval when not pipe splitting

2017-12-13 Thread Harry Wentland
From: Dmytro Laktyushkin 

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c |  8 
 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 12 ++--
 drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h   |  6 +++---
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c 
b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index 366aace8c323..5e2ea12fbb73 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -782,11 +782,11 @@ void mode_support_and_system_configuration(struct 
dcn_bw_internal_vars *v)
v->dst_y_after_scaler = 0.0;
}
v->time_calc = 24.0 / 
v->projected_dcfclk_deep_sleep;
-   v->v_update_offset[k] 
=dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+   v->v_update_offset[k][j] = 
dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
v->total_repeater_delay = 
v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) 
+ 3.0 / v->required_dispclk[i][j]);
-   v->v_update_width[k] = (14.0 / 
v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + 
v->total_repeater_delay) * v->pixel_clock[k];
-   v->v_ready_offset[k] =dcn_bw_max2(150.0 / 
(v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / 
v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) 
* v->pixel_clock[k];
-   v->time_setup = (v->v_update_offset[k] + 
v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+   v->v_update_width[k][j] = (14.0 / 
v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + 
v->total_repeater_delay) * v->pixel_clock[k];
+   v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / 
(v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / 
v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) 
* v->pixel_clock[k];
+   v->time_setup = (v->v_update_offset[k][j] + 
v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k];
v->extra_latency = 
v->urgent_round_trip_and_out_of_order_latency_per_state[i] + 
(v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + 
v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / 
v->return_bw_per_state[i];
if (v->pte_enable == dcn_bw_yes) {
v->extra_latency = v->extra_latency + 
v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / 
v->return_bw_per_state[i];
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 
b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index c3cfd48e0423..331891c2c71a 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1014,9 +1014,9 @@ bool dcn_validate_bandwidth(
if (pipe->top_pipe && pipe->top_pipe->plane_state == 
pipe->plane_state)
continue;
 
-   pipe->pipe_dlg_param.vupdate_width = 
v->v_update_width[input_idx];
-   pipe->pipe_dlg_param.vupdate_offset = 
v->v_update_offset[input_idx];
-   pipe->pipe_dlg_param.vready_offset = 
v->v_ready_offset[input_idx];
+   pipe->pipe_dlg_param.vupdate_width = 
v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+   pipe->pipe_dlg_param.vupdate_offset = 
v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+   pipe->pipe_dlg_param.vready_offset = 
v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
pipe->pipe_dlg_param.vstartup_start = 
v->v_startup[input_idx];
 
pipe->pipe_dlg_param.htotal = 
pipe->stream->timing.h_total;
@@ -1055,9 +1055,9 @@ bool dcn_validate_bandwidth(
 TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
if (hsplit_pipe && 
hsplit_pipe->plane_state == pipe->plane_state) {
/* update previously split pipe 
*/
-   
hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
-   
hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
-   
hsplit_pipe->pipe_dlg_param.vready_offset = v->v_read

[PATCH 03/30] drm/amd/display: Error print when ATOM BIOS implementation is missing

2017-12-13 Thread Harry Wentland
We fail apply_ctx_to_hw when crtc_source_select is missing. This isn't
really helpful at this point. It would aid ASIC bringup if we log an error
when we can't find the implementation for the ATOM version.

Do the same for all other function points in the command table that do a
NULL check before being called.

Signed-off-by: Harry Wentland 
Reviewed-by: Roman Li 
Reviewed-by: Jordan Lazare 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/bios/command_table.c | 21 +
 .../gpu/drm/amd/display/dc/bios/command_table2.c| 13 +
 2 files changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c 
b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 3f7b2dabc2b0..1aefed8cf98b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -387,6 +387,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
+   dm_error("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -910,6 +911,8 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
+   dm_error("Don't have set_pixel_clock for v%d\n",
+BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
}
@@ -1227,6 +1230,8 @@ static void init_enable_spread_spectrum_on_ppll(struct 
bios_parser *bp)
enable_spread_spectrum_on_ppll_v3;
break;
default:
+   dm_error("Don't have enable_spread_spectrum_on_ppll for v%d\n",
+
BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
break;
}
@@ -1422,6 +1427,8 @@ static void init_adjust_display_pll(struct bios_parser 
*bp)
bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
break;
default:
+   dm_error("Don't have adjust_display_pll for v%d\n",
+BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
bp->cmd_tbl.adjust_display_pll = NULL;
break;
}
@@ -1695,6 +1702,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
+   dm_error("Don't have set_crtc_timing for dtd v%d\n",
+dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1704,6 +1713,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
break;
default:
+   dm_error("Don't have set_crtc_timing for v%d\n",
+BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1890,6 +1901,8 @@ static void init_select_crtc_source(struct bios_parser 
*bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
+   dm_error("Don't select_crtc_source enable_crtc for v%d\n",
+BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
}
@@ -1997,6 +2010,8 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
+   dm_error("Don't have enable_crtc for v%d\n",
+BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -2103,6 +2118,8 @@ static void init_program_clock(struct bios_parser *bp)
bp->cmd_tbl.program_clock = program_clock_v6;
break;
default:
+   dm_error("Don't have program_clock for v%d\n",
+BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.program_clock = NULL;
break;
}
@@ -2324,6 +2341,8 @@ static void init_enable_disp_power_gating(
enable_disp_power_gating_v2_1;
break;
default:
+   dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n",
+BIOS_CMD_TABLE_PARA_REVISION(Ena

[PATCH 11/30] drm/amd/display: clean up dcn soc params

2017-12-13 Thread Harry Wentland
From: Dmytro Laktyushkin 

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c   | 29 --
 .../gpu/drm/amd/display/dc/dml/display_mode_lib.c  | 29 --
 .../drm/amd/display/dc/dml/display_mode_structs.h  |  4 ---
 3 files changed, 62 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c 
b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 47dbc953a3a9..c3cfd48e0423 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1585,35 +1585,6 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)

dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,

dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
dc->dcn_ip->dcfclk_cstate_latency);
-   dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
-   dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
-   dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
-   dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
-
-   dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
-   dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
-   dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
-   dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
-
-   dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
-   dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
-   dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
-   dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
-
-   dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
-   dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
-   dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
-   dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
-
-   dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
-   dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
-   dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
-   dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
-
-   dc->dml.soc.vmin.dram_bw_per_chan_gbps = 
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
-   dc->dml.soc.vmid.dram_bw_per_chan_gbps = 
dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
-   dc->dml.soc.vnom.dram_bw_per_chan_gbps = 
dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
-   dc->dml.soc.vmax.dram_bw_per_chan_gbps = 
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
 
dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
dc->dml.soc.sr_enter_plus_exit_time_us = 
dc->dcn_soc->sr_enter_plus_exit_time;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 4c31fa54af39..c109b2c34c8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,35 +35,6 @@ static void set_soc_bounding_box(struct 
_vcs_dpi_soc_bounding_box_st *soc, enum
soc->writeback_latency_us = 12.0;
soc->ideal_dram_bw_after_urgent_percent = 80.0;
soc->max_request_size_bytes = 256;
-
-   soc->vmin.dcfclk_mhz = 300.0;
-   soc->vmin.dispclk_mhz = 608.0;
-   soc->vmin.dppclk_mhz = 435.0;
-   soc->vmin.dram_bw_per_chan_gbps = 12.8;
-   soc->vmin.phyclk_mhz = 540.0;
-   soc->vmin.socclk_mhz = 208.0;
-
-   soc->vmid.dcfclk_mhz = 600.0;
-   soc->vmid.dispclk_mhz = 661.0;
-   soc->vmid.dppclk_mhz = 661.0;
-   soc->vmid.dram_bw_per_chan_gbps = 12.8;
-   soc->vmid.phyclk_mhz = 540.0;
-   soc->vmid.socclk_mhz = 208.0;
-
-   soc->vnom.dcfclk_mhz = 600.0;
-   soc->vnom.dispclk_mhz = 661.0;
-   soc->vnom.dppclk_mhz = 661.0;
-   soc->vnom.dram_bw_per_chan_gbps = 38.4;
-   soc->vnom.phyclk_mhz = 810;
-   soc->vnom.socclk_mhz = 208.0;
-
-   soc->vmax.dcfclk_mhz = 600.0;
-   soc->vmax.dispclk_mhz = 1086.0;
-   soc->vmax.dppclk_mhz = 661.0;
-   soc->vmax.dram_bw_per_chan_gbps = 38.4;
-   soc->vmax.phyclk_mhz = 810.0;
-   soc->vmax.socclk_mhz = 208.0;
-
soc->downspread_percent = 0.5;
soc->dram_page_open_time_ns = 50.0;
soc->dram_rw_turnaround_time_ns = 17.5;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 2d9d6298f0d3..aeebd8bee628 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/

[PATCH 04/30] drm/amd/display: Don't spam debug log on long reg waits

2017-12-13 Thread Harry Wentland
Certain reg waits take up to a frame. Don't spam the log when this
happens.

Signed-off-by: Harry Wentland 
Reviewed-by: Jordan Lazare 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/os_types.h | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h 
b/drivers/gpu/drm/amd/display/dc/os_types.h
index 68ce2ab8f455..1fcbc99e63b5 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -44,14 +44,10 @@
 #undef WRITE
 #undef FRAME_SIZE
 
-#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
+#define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
 
 #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
 
-#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
-
-#define dm_vlog(fmt, args) vprintk(fmt, args)
-
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 #include 
 #endif
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/30] DC Patches Dec 13, 2017

2017-12-13 Thread Harry Wentland
 * Fix MST headless hotplug
 * Add bunch of error prints for missing BIOS implementations
 * Remove log spam for long reg waits
 * Bunch of DCN fixes and cleanup of HW programming code

Andrew Jiang (1):
  drm/amd/display: dal 3.1.27

Anthony Koo (1):
  drm/amd/display: Fix check for whether dmcu fw is running

Dmytro Laktyushkin (4):
  drm/amd/display: clean up dcn soc params
  drm/amd/display: fix rotated surface scaling
  drm/amd/display: fix global sync param retrieval when not pipe
splitting
  drm/amd/display: fix 180 full screen pipe split

Eric Bernstein (5):
  drm/amd/display: Update HUBP
  drm/amd/display: Remove dwbc from pipe_ctx
  drm/amd/display: Clean up DCN cursor code
  drm/amd/display: Put dcn_mi_registers with other structs
  drm/amd/display: Update FMT and OPPBUF functions

Eric Yang (2):
  drm/amd/display: dal 3.1.26
  drm/amd/display: reprogram surface config on scaling change

Harry Wentland (5):
  drm/amd/display: Print type if we get wrong ObjectID from bios
  drm/amd/display: Remove dead enable_plane function definition and call
  drm/amd/display: Error print when ATOM BIOS implementation is missing
  drm/amd/display: Don't spam debug log on long reg waits
  drm/amd/display: Call validate_fbc should_enable_fbc

Hugo Hu (1):
  drm/amd/display: Use the maximum link setting which EDP reported.

Jerry (Fangzhi) Zuo (1):
  drm/amd/display: Fix rehook MST display not light back on

Leo (Sunpeng) Li (2):
  drm/amd/display: Do DC mode-change check after stream creation
  drm/amd/display: Fix unused variable warnings.

Vitaly Prosyak (2):
  drm/amd/display: Define BLNDGAM_CONFIG_STATUS
  drm/amd/display: Declare and share color space types for dcn's

Yongqiang Sun (1):
  drm/amd/display: Add hdr_supported flag

Yue Hin Lau (5):
  drm/amd/display: integrating optc pseudocode
  drm/amd/display: hubp refactor
  drm/amd/display: Only blank DCN when we have set_blank implementation
  drm/amd/display: check for null before calling is_blanked
  drm/amd/display: Expose dpp1_set_cursor_attributes

 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |  28 +--
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h  |   2 +
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c|  51 +
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.h|   1 +
 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c  |  14 +-
 .../gpu/drm/amd/display/dc/bios/command_table.c|  21 ++
 .../gpu/drm/amd/display/dc/bios/command_table2.c   |  13 ++
 .../gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c   |   8 +-
 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c   |  41 +---
 drivers/gpu/drm/amd/display/dc/core/dc.c   |   4 +-
 .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c  | 145 +
 drivers/gpu/drm/amd/display/dc/core/dc_link.c  |  16 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c   |   6 +
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c  |  63 +++---
 drivers/gpu/drm/amd/display/dc/dc.h|   2 +-
 drivers/gpu/drm/amd/display/dc/dc_types.h  |   1 +
 drivers/gpu/drm/amd/display/dc/dce/dce_abm.c   |  18 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_abm.h   |   8 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c|   4 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c  |  34 +++-
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h  |   3 +
 .../amd/display/dc/dce110/dce110_hw_sequencer.c|  42 ++--
 drivers/gpu/drm/amd/display/dc/dcn10/Makefile  |   2 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h   |   8 +-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c|  65 ++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c  |  74 +++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h  | 224 +++--
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  56 +++---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c   |  72 ++-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h   |  43 +++-
 .../{dcn10_timing_generator.c => dcn10_optc.c} |  22 +-
 .../{dcn10_timing_generator.h => dcn10_optc.h} |  27 ++-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |   2 +-
 .../gpu/drm/amd/display/dc/dml/display_mode_lib.c  |  29 ---
 .../drm/amd/display/dc/dml/display_mode_structs.h  |   4 -
 drivers/gpu/drm/amd/display/dc/inc/core_types.h|   1 -
 drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h |   6 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/abm.h|   4 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h   |   1 +
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h|   2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h   |  14 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h  |  21 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/opp.h|  38 ++--
 .../drm/amd/display/dc/inc/hw/timing_generator.h   |  19 ++
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h  |   9 +-
 drivers/gpu/drm/amd/display/dc/os_types.h  |   6 +-
 46 files changed, 743 insertions(+), 531 deletions(-)
 rename drivers/gpu/drm/amd/displa

[PATCH 12/30] drm/amd/display: fix rotated surface scaling

2017-12-13 Thread Harry Wentland
From: Dmytro Laktyushkin 

This is a resubmit with the errors fixed

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 61 +++
 1 file changed, 29 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ae9312df0a1c..bc1b5f42a0a4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -527,12 +527,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, 
struct view *recout_skip
pipe_ctx->bottom_pipe->plane_state == 
pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == 
pipe_ctx->plane_state;
-
-   if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
-   stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-   pri_split = false;
-   sec_split = false;
-   }
+   bool top_bottom_split = stream->view_format == 
VIEW_3D_FORMAT_TOP_AND_BOTTOM;
 
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
@@ -567,17 +562,15 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, 
struct view *recout_skip
- 
pipe_ctx->plane_res.scl_data.recout.y;
 
/* Handle h & vsplit */
-   if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
-   pipe_ctx->plane_state && stream->view_format == 
VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-   pipe_ctx->plane_res.scl_data.recout.y += 
pipe_ctx->plane_res.scl_data.recout.height / 2;
+   if (sec_split && top_bottom_split) {
+   pipe_ctx->plane_res.scl_data.recout.y +=
+   pipe_ctx->plane_res.scl_data.recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */
-   pipe_ctx->plane_res.scl_data.recout.height = 
(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
-   } else if (pipe_ctx->bottom_pipe &&
-   pipe_ctx->bottom_pipe->plane_state == 
pipe_ctx->plane_state
-   && stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+   pipe_ctx->plane_res.scl_data.recout.height =
+   (pipe_ctx->plane_res.scl_data.recout.height + 
1) / 2;
+   } else if (pri_split && top_bottom_split)
pipe_ctx->plane_res.scl_data.recout.height /= 2;
-
-   if (pri_split || sec_split) {
+   else if (pri_split || sec_split) {
/* HMirror XOR Secondary_pipe XOR Rotation_180 */
bool right_view = (sec_split != plane_state->horizontal_mirror) 
!=
(plane_state->rotation == 
ROTATION_ANGLE_180);
@@ -601,32 +594,17 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, 
struct view *recout_skip
 *  * 1/ stream scaling ratio) - (surf 
surf_src offset * 1/ full scl
 *  ratio)
 */
-   recout_full_x = stream->dst.x + (plane_state->dst_rect.x -  
stream->src.x)
+   recout_full_x = stream->dst.x + (plane_state->dst_rect.x - 
stream->src.x)
* stream->dst.width / stream->src.width 
-
surf_src.x * plane_state->dst_rect.width / 
surf_src.width
* stream->dst.width / stream->src.width;
-   recout_full_y = stream->dst.y + (plane_state->dst_rect.y -  
stream->src.y)
+   recout_full_y = stream->dst.y + (plane_state->dst_rect.y - 
stream->src.y)
* stream->dst.height / 
stream->src.height -
surf_src.y * plane_state->dst_rect.height / 
surf_src.height
* stream->dst.height / 
stream->src.height;
 
recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - 
recout_full_x;
recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - 
recout_full_y;
-
-   /*Adjust recout_skip for rotation */
-   if ((pri_split || sec_split) && (plane_state->rotation == 
ROTATION_ANGLE_270 || plane_state->rotation == ROTATION_ANGLE_180)) {
-   bool right_view = (sec_split != plane_state->horizontal_mirror) 
!=
-   (plane_state->rotation == 
ROTATION_ANGLE_180);
-
-   if (plane_state->rotation == ROTATION_ANGLE_90
-   || plane_state->rotation == ROTATION_ANGLE_270)
-   /* Secondary_pipe XOR Rotation_270 */
-   right_view = (plane_state->rotation == 
ROTATION_ANGLE_270) != sec_split;
-   if (!right_view)
-   recout_ski

[PATCH 10/30] drm/amd/display: dal 3.1.26

2017-12-13 Thread Harry Wentland
From: Eric Yang 

Signed-off-by: Eric Yang 
Reviewed-by: Andrew Jiang 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 3b49ca3027b6..1b1c7300dfc3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.25"
+#define DC_VER "3.1.26"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/ttm: completely rework ttm_bo_delayed_delete

2017-12-13 Thread Thomas Hellstrom

Hi, Christian,

While this has probably already been committed, and looks like a nice 
cleanup there are two things below I think needs fixing.


On 11/15/2017 01:31 PM, Christian König wrote:

There is no guarantee that the next entry on the ddelete list stays on
the list when we drop the locks.

Completely rework this mess by moving processed entries on a temporary
list.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/ttm/ttm_bo.c | 77 ++--
  1 file changed, 25 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7c1eac4f4b4b..ad0afdd71f21 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -572,71 +572,47 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object 
*bo,
   * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
   * encountered buffers.
   */
-
-static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  {
struct ttm_bo_global *glob = bdev->glob;
-   struct ttm_buffer_object *entry = NULL;
-   int ret = 0;
-
-   spin_lock(&glob->lru_lock);
-   if (list_empty(&bdev->ddestroy))
-   goto out_unlock;
+   struct list_head removed;
+   bool empty;
  
-	entry = list_first_entry(&bdev->ddestroy,

-   struct ttm_buffer_object, ddestroy);
-   kref_get(&entry->list_kref);
+   INIT_LIST_HEAD(&removed);
  
-	for (;;) {

-   struct ttm_buffer_object *nentry = NULL;
-
-   if (entry->ddestroy.next != &bdev->ddestroy) {
-   nentry = list_first_entry(&entry->ddestroy,
-   struct ttm_buffer_object, ddestroy);
-   kref_get(&nentry->list_kref);
-   }
-
-   ret = reservation_object_trylock(entry->resv) ? 0 : -EBUSY;
-   if (remove_all && ret) {
-   spin_unlock(&glob->lru_lock);
-   ret = reservation_object_lock(entry->resv, NULL);
-   spin_lock(&glob->lru_lock);
-   }
+   spin_lock(&glob->lru_lock);
+   while (!list_empty(&bdev->ddestroy)) {
+   struct ttm_buffer_object *bo;
  
-		if (!ret)

-   ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
- true);
-   else
-   spin_unlock(&glob->lru_lock);
+   bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
+ ddestroy);
+   kref_get(&bo->list_kref);
+   list_move_tail(&bo->ddestroy, &removed);
+   spin_unlock(&glob->lru_lock);
  
-		kref_put(&entry->list_kref, ttm_bo_release_list);

-   entry = nentry;
+   reservation_object_lock(bo->resv, NULL);


Reservation may be a long lived lock, and typically if the object is 
reserved here, it's being evicted somewhere and there might be a 
substantial stall, which isn't really acceptable in the global 
workqueue. Better to move on to the next bo.
This function was really intended to be non-blocking, unless remove_all 
== true. I even think it's safe to keep the spinlock held on tryreserve?


  
-		if (ret || !entry)

-   goto out;
+   spin_lock(&glob->lru_lock);
+   ttm_bo_cleanup_refs(bo, false, !remove_all, true);
  
+		kref_put(&bo->list_kref, ttm_bo_release_list);


Calling a release function in atomic context is a bad thing. Nobody 
knows what locks needs to be taken in the release function and such code 
is prone to lock inversion and sleep-while-atomic bugs. Not long ago 
vfree() was even forbidden from atomic context. But here it's easily 
avoidable.


/Thomas


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: FW: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma buf.

2017-12-13 Thread Deucher, Alexander
Please send the drm prime patch to dri-devel if you didn't already.


Alex


From: amd-gfx  on behalf of Samuel Li 

Sent: Wednesday, December 13, 2017 2:17:49 PM
To: Koenig, Christian; amd-gfx@lists.freedesktop.org
Subject: Re: FW: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma 
buf.

For the record.


On 2017-12-13 01:26 PM, Christian König wrote:
> Actually we try to avoid that drivers define their own dma_buf_ops in DRM.
>
> That's why you have all those callbacks in drm_driver which just mirror the 
> dma_buf interface but unpack the GEM object from the dma-buf object.
>
> There are quite a number of exceptions, but those drivers then implement 
> everything on their own because the DRM marshaling doesn't make sense for 
> them.
>
> Christian.
>
> Am 13.12.2017 um 19:01 schrieb Samuel Li:
>> That is an approach. The cost is to add a new call back, which is not 
>> necessary though, since driver can always actually define their own 
>> dma_buf_ops.
>> The intention here is to allow a driver reuse drm_gem_prime_dmabuf_ops{}. If 
>> you would like to go this far, maybe a more straight forward way is to 
>> export those ops, e.g. drm_gem_map_attach, so that a driver can use them in 
>> its own definitions.
>>
>> Sam
>>
>>
>>
>> On 2017-12-13 05:23 AM, Christian König wrote:
>>> Something like the attached patch. Not even compile tested.
>>>
>>> Christian.
>>>
>>> Am 12.12.2017 um 20:13 schrieb Samuel Li:
 Not sure if I understand your comments correctly. Currently amdgpu prime 
 reuses drm_gem_prime_dmabuf_ops{}, and it is defined as static which is 
 reasonable. I do not see an easier way to introduce 
 amdgpu_gem_begin_cpu_access().

 Sam

 On 2017-12-12 01:30 PM, Christian König wrote:
>> +while (amdgpu_dmabuf_ops.begin_cpu_access != 
>> amdgpu_gem_begin_cpu_access)
> I would rather just add the four liner code to drm to forward the 
> begin_cpu_access callback into a drm_driver callback instead of all this.
>
> But apart from that it looks good to me.
>
> Christian.
>
> Am 12.12.2017 um 19:14 schrieb Li, Samuel:
>> A gentle ping on this one, Christian, can you take a look at this?
>>
>> Sam
>>
>> -Original Message-
>> From: Li, Samuel
>> Sent: Friday, December 08, 2017 5:22 PM
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Li, Samuel 
>> Subject: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma 
>> buf.
>>
>> To improve cpu read performance. This is implemented for APUs currently.
>>
>> v2: Adapt to change 
>> https://lists.freedesktop.org/archives/amd-gfx/2017-October/015174.html
>>
>> Change-Id: I7a583e23a9ee706e0edd2a46f4e4186a609368e3
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu.h   |  2 ++
>> drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   |  2 +-
>> drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 58 
>> +++
>> 3 files changed, 61 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index f8657c3..193db70 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -417,6 +417,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device 
>> *dev,  struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
>> struct drm_gem_object *gobj,
>> int flags);
>> +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
>> +struct dma_buf *dma_buf);
>> int amdgpu_gem_prime_pin(struct drm_gem_object *obj);  void 
>> amdgpu_gem_prime_unpin(struct drm_gem_object *obj);  struct 
>> reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> index 31383e0..df30b08 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> @@ -868,7 +868,7 @@ static struct drm_driver kms_driver = {
>> .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>> .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>> .gem_prime_export = amdgpu_gem_prime_export,
>> -.gem_prime_import = drm_gem_prime_import,
>> +.gem_prime_import = amdgpu_gem_prime_import,
>> .gem_prime_pin = amdgpu_gem_prime_pin,
>> .gem_prime_unpin = amdgpu_gem_prime_unpin,
>> .gem_prime_res_obj = amdgpu_gem_prime_res_obj, diff --git 
>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
>> index ae9c106..de6f599 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/a

[PATCH] drm/amdgpu: Simplify amdgpu_lockup_timeout usage.

2017-12-13 Thread Andrey Grodzovsky
With introduction of amdgpu_gpu_recovery we don't need any more
to rely on amdgpu_lockup_timeout == 0 for disabling GPU reset.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c|  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 14 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c  |  2 +-
 5 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a074502..98fb9f9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1229,6 +1229,11 @@ static void amdgpu_check_arguments(struct amdgpu_device 
*adev)
 amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
+
+   if (amdgpu_lockup_timeout == 0) {
+   dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 
1\n");
+   amdgpu_lockup_timeout = 1;
+   }
 }
 
 /**
@@ -2831,7 +2836,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return false;
 
-   return amdgpu_lockup_timeout > 0 ? true : false;
+   return amdgpu_gpu_recovery;
 }
 
 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b734cd6..1fc5499 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
 int amdgpu_hw_i2c = 0;
 int amdgpu_pcie_gen2 = -1;
 int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 1;
 int amdgpu_dpm = -1;
 int amdgpu_fw_load_type = -1;
 int amdgpu_aspm = -1;
@@ -166,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(msi, amdgpu_msi, int, 0444);
 
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = 
disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 
1)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 854baf0..9484aed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
  unsigned num_hw_submission)
 {
-   long timeout;
int r;
 
/* Check that num_hw_submission is a power of two */
@@ -434,20 +433,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
-   timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
-   if (timeout == 0) {
-   /*
-* FIXME:
-* Delayed workqueue cannot use it directly,
-* so the scheduler will not use delayed workqueue if
-* MAX_SCHEDULE_TIMEOUT is set.
-* Currently keep it simple and silly.
-*/
-   timeout = MAX_SCHEDULE_TIMEOUT;
-   }
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
   num_hw_submission, amdgpu_job_hang_limit,
-  timeout, ring->name);
+  msecs_to_jiffies(amdgpu_lockup_timeout), 
ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
  ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 
b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 7ade56d..43e74ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -277,7 +277,7 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device 
*adev,
int r;
 
/* trigger gpu-reset by hypervisor only if TDR disbaled */
-   if (amdgpu_lockup_timeout == 0) {
+   if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c 
b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index e05823d..da7c261 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -545,7 +545,7 @@ static int xgpu_vi_mailbox_rcv_irq(st

Re: [PATCH 1/6] drm/ttm: add on_alloc_stage and reservation into ttm_operation_ctx

2017-12-13 Thread Thomas Hellstrom

Hi,

I think this series is quite poorly documented. We should have a log 
message explaining the purpose of the commit.
Also since it's not obvious what the series is attempting to achieve, 
please add a 0/X series header message..


/Thomas


On 12/12/2017 10:33 AM, Roger He wrote:

on_alloc_stage: is this operation on allocation stage
resv: reservation bo used of this operation

Change-Id: I01ea482e8c7470014196eb218e2ff8913306eef0
Signed-off-by: Roger He 
---
  include/drm/ttm/ttm_bo_api.h | 4 
  1 file changed, 4 insertions(+)

diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 368eb02..25de597 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -263,6 +263,8 @@ struct ttm_bo_kmap_obj {
   *
   * @interruptible: Sleep interruptible if sleeping.
   * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @on_alloc_stage: is this operation on allocation stage
+ * @resv: resvation bo used
   *
   * Context for TTM operations like changing buffer placement or general memory
   * allocation.
@@ -270,6 +272,8 @@ struct ttm_bo_kmap_obj {
  struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
+   bool on_alloc_stage;
+   struct reservation_object *resv;
uint64_t bytes_moved;
  };
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: FW: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma buf.

2017-12-13 Thread Samuel Li
For the record.


On 2017-12-13 01:26 PM, Christian König wrote:
> Actually we try to avoid that drivers define their own dma_buf_ops in DRM.
> 
> That's why you have all those callbacks in drm_driver which just mirror the 
> dma_buf interface but unpack the GEM object from the dma-buf object.
> 
> There are quite a number of exceptions, but those drivers then implement 
> everything on their own because the DRM marshaling doesn't make sense for 
> them.
> 
> Christian.
> 
> Am 13.12.2017 um 19:01 schrieb Samuel Li:
>> That is an approach. The cost is to add a new call back, which is not 
>> necessary though, since driver can always actually define their own 
>> dma_buf_ops.
>> The intention here is to allow a driver reuse drm_gem_prime_dmabuf_ops{}. If 
>> you would like to go this far, maybe a more straight forward way is to 
>> export those ops, e.g. drm_gem_map_attach, so that a driver can use them in 
>> its own definitions.
>>
>> Sam
>>
>>
>>
>> On 2017-12-13 05:23 AM, Christian König wrote:
>>> Something like the attached patch. Not even compile tested.
>>>
>>> Christian.
>>>
>>> Am 12.12.2017 um 20:13 schrieb Samuel Li:
 Not sure if I understand your comments correctly. Currently amdgpu prime 
 reuses drm_gem_prime_dmabuf_ops{}, and it is defined as static which is 
 reasonable. I do not see an easier way to introduce 
 amdgpu_gem_begin_cpu_access().

 Sam

 On 2017-12-12 01:30 PM, Christian König wrote:
>> +    while (amdgpu_dmabuf_ops.begin_cpu_access != 
>> amdgpu_gem_begin_cpu_access)
> I would rather just add the four liner code to drm to forward the 
> begin_cpu_access callback into a drm_driver callback instead of all this.
>
> But apart from that it looks good to me.
>
> Christian.
>
> Am 12.12.2017 um 19:14 schrieb Li, Samuel:
>> A gentle ping on this one, Christian, can you take a look at this?
>>
>> Sam
>>
>> -Original Message-
>> From: Li, Samuel
>> Sent: Friday, December 08, 2017 5:22 PM
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Li, Samuel 
>> Subject: [PATCH v2 2/2] drm/amdgpu: Move to gtt before cpu accesses dma 
>> buf.
>>
>> To improve cpu read performance. This is implemented for APUs currently.
>>
>> v2: Adapt to change 
>> https://lists.freedesktop.org/archives/amd-gfx/2017-October/015174.html
>>
>> Change-Id: I7a583e23a9ee706e0edd2a46f4e4186a609368e3
>> ---
>>     drivers/gpu/drm/amd/amdgpu/amdgpu.h   |  2 ++
>>     drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   |  2 +-
>>     drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 58 
>> +++
>>     3 files changed, 61 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index f8657c3..193db70 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -417,6 +417,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device 
>> *dev,  struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
>>     struct drm_gem_object *gobj,
>>     int flags);
>> +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
>> +    struct dma_buf *dma_buf);
>>     int amdgpu_gem_prime_pin(struct drm_gem_object *obj);  void 
>> amdgpu_gem_prime_unpin(struct drm_gem_object *obj);  struct 
>> reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> index 31383e0..df30b08 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> @@ -868,7 +868,7 @@ static struct drm_driver kms_driver = {
>>     .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>>     .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>>     .gem_prime_export = amdgpu_gem_prime_export,
>> -    .gem_prime_import = drm_gem_prime_import,
>> +    .gem_prime_import = amdgpu_gem_prime_import,
>>     .gem_prime_pin = amdgpu_gem_prime_pin,
>>     .gem_prime_unpin = amdgpu_gem_prime_unpin,
>>     .gem_prime_res_obj = amdgpu_gem_prime_res_obj, diff --git 
>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
>> index ae9c106..de6f599 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
>> @@ -26,6 +26,7 @@
>>     #include 
>>       #include "amdgpu.h"
>> +#include "amdgpu_display.h"
>>     #include 
>>     #include 
>>     @@ -164,6 +165,33 @@ struct reservation_object 
>> *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
>>     return bo->tbo.resv;

[PATCH v2] drm/amdgpu: Add gpu_recovery parameter

2017-12-13 Thread Andrey Grodzovsky
Add new parameter to control GPU recovery procedure.
Retire old way of disabling GPU recovery by setting lockup_timeout == 0 and
set default for lockup_timeout to 10s.

v2:
Add auto logic where reset is disabled for bare metal and enabled
for SR-IOV.
Allow forced reset from debugfs.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 9 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c| 4 
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c| 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c| 2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c  | 2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c  | 2 +-
 8 files changed, 19 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3735500..d7f0263 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
 extern int amdgpu_job_hang_limit;
 extern int amdgpu_lbpw;
 extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
 
 #ifdef CONFIG_DRM_AMDGPU_SI
 extern int amdgpu_si_support;
@@ -1879,7 +1880,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_psp_check_fw_loading_status(adev, i) 
(adev)->firmware.funcs->check_fw_loading_status((adev), (i))
 
 /* Common functions */
-int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job);
+int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, 
bool force);
 bool amdgpu_need_backup(struct amdgpu_device *adev);
 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_need_post(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8d03baa..a074502 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3015,11 +3015,12 @@ static int amdgpu_reset_sriov(struct amdgpu_device 
*adev, uint64_t *reset_flags,
  *
  * @adev: amdgpu device pointer
  * @job: which job trigger hang
+ * @force forces reset regardless of amdgpu_gpu_recovery
  *
  * Attempt to reset the GPU if it has hung (all asics).
  * Returns 0 for success or an error on failure.
  */
-int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
+int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, 
bool force)
 {
struct drm_atomic_state *state = NULL;
uint64_t reset_flags = 0;
@@ -3030,6 +3031,12 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, 
struct amdgpu_job *job)
return 0;
}
 
+   if (!force && (amdgpu_gpu_recovery == 0 ||
+   (amdgpu_gpu_recovery == -1  && 
!amdgpu_sriov_vf(adev {
+   DRM_INFO("GPU recovery disabled.\n");
+   return 0;
+   }
+
dev_info(adev->dev, "GPU reset begin!\n");
 
mutex_lock(&adev->lock_reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0b039bd..b734cd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
 int amdgpu_job_hang_limit = 0;
 int amdgpu_lbpw = -1;
 int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = -1; /* auto */
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
 MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across 
pipes (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
 
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 
= disable, -1 = auto");
+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
 #ifdef CONFIG_DRM_AMDGPU_SI
 
 #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1469963..854baf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -705,7 +705,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, 
void *data)
struct amdgpu_device *adev = dev->dev_private;
 
seq_printf(m, "gpu recover\n");
-   amdgpu_gpu_recover(adev, NULL);
+   amdgpu_gpu_recover(adev, NULL, true);
 
return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index c340774..c43643e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct 
*work)
  

Re: [PATCH] drm/amdgpu: add enumerate for PDB/PTB

2017-12-13 Thread Christian König

Am 13.12.2017 um 08:19 schrieb Chunming Zhou:

Change-Id: Ic1f39d3bc853e9e4259d3e03a22920eda822eec5
Signed-off-by: Chunming Zhou 


You dropped reversing the ordering and replaced that with noting the 
root level separately? Nifty idea.


Just please drop AMDGPU_VM_SUBPTB, translate further is something we 
hopefully will only use the first and last time for Raven.


So I would like to keep that completely transparent to the VM code and 
do the patching in the GMC specific implementation for Raven.


Christian.


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 69 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 14 +++
  2 files changed, 66 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 709587d8a77f..fc858ddf9319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -148,12 +148,29 @@ struct amdgpu_prt_cb {
  static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
  unsigned level)
  {
-   if (level != adev->vm_manager.num_level)
-   return 9 * (adev->vm_manager.num_level - level - 1) +
+   unsigned shift = 0xff;
+
+   switch (level) {
+   case AMDGPU_VM_PDB2:
+   case AMDGPU_VM_PDB1:
+   case AMDGPU_VM_PDB0:
+   shift = 9 * (adev->vm_manager.last_level - level - 1) +
adev->vm_manager.block_size;
-   else
-   /* For the page tables on the leaves */
-   return 0;
+   break;
+   case AMDGPU_VM_PTB:
+   if (adev->vm_manager.last_level == AMDGPU_VM_PTB)
+   shift = 0;
+   else
+   shift = adev->vm_manager.block_size;
+   break;
+   case AMDGPU_VM_SUBPTB:
+   shift = 0;
+   break;
+   default:
+   dev_err(adev->dev, "the level%d isn't supported.\n", level);
+   }
+
+   return shift;
  }
  
  /**

@@ -166,12 +183,13 @@ static unsigned amdgpu_vm_level_shift(struct 
amdgpu_device *adev,
  static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
  unsigned level)
  {
-   unsigned shift = amdgpu_vm_level_shift(adev, 0);
+   unsigned shift = amdgpu_vm_level_shift(adev,
+  adev->vm_manager.root_level);
  
-	if (level == 0)

+   if (level == adev->vm_manager.root_level)
/* For the root directory */
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
-   else if (level != adev->vm_manager.num_level)
+   else if (level != adev->vm_manager.last_level)
/* Everything in between */
return 512;
else
@@ -385,7 +403,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device 
*adev,
spin_unlock(&vm->status_lock);
}
  
-		if (level < adev->vm_manager.num_level) {

+   if (level < adev->vm_manager.last_level) {
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1);
@@ -431,7 +449,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
  
-	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);

+   return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+ adev->vm_manager.root_level);
  }
  
  /**

@@ -1247,7 +1266,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device 
*adev,
return 0;
  
  error:

-   amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
+   amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+  adev->vm_manager.root_level);
amdgpu_job_free(job);
return r;
  }
@@ -1266,7 +1286,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params 
*p, uint64_t addr,
 struct amdgpu_vm_pt **entry,
 struct amdgpu_vm_pt **parent)
  {
-   unsigned level = 0;
+   unsigned level = p->adev->vm_manager.root_level;
  
  	*parent = NULL;

*entry = &p->vm->root;
@@ -1278,7 +1298,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params 
*p, uint64_t addr,
addr &= (1ULL << shift) - 1;
}
  
-	if (level != p->adev->vm_manager.num_level)

+   if (level != p->adev->vm_manager.last_level)
*entry = NULL;
  }
  
@@ -1320,7 +1340,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,

return;
entry->huge = !!(flags & AMDGPU_PDE_PTE);
  
-	amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.num_level - 1,

+   amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_m

Re: [PATCH] drm/amdgpu: Add gpu_recovery parameter

2017-12-13 Thread Christian König

Am 13.12.2017 um 13:53 schrieb Andrey Grodzovsky:



On 12/13/2017 07:20 AM, Christian König wrote:

Am 12.12.2017 um 20:16 schrieb Andrey Grodzovsky:

Add new parameter to control GPU recovery procedure.
Retire old way of disabling GPU recovery by setting lockup_timeout 
== 0 and

set default for lockup_timeout to 10s.

Signed-off-by: Andrey Grodzovsky 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h    | 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c    | 8 ++--
  3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index 3735500..26abe03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
  extern int amdgpu_job_hang_limit;
  extern int amdgpu_lbpw;
  extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
    #ifdef CONFIG_DRM_AMDGPU_SI
  extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 8d03baa..d84b57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3030,6 +3030,11 @@ int amdgpu_gpu_recover(struct amdgpu_device 
*adev, struct amdgpu_job *job)

  return 0;
  }
  +    if (!amdgpu_gpu_recovery) {
+    DRM_INFO("GPU recovery disabled.\n");
+    return 0;
+    }
+


Please move this check into the caller of amdgpu_gpu_recover().

This way we can still trigger a GPU recovery manually or from the 
hypervisor under SRIOV.


Christian.


Problem with this is that amdgpu_check_soft_reset will not be called, 
this function which prints which IP block was hung even when later we 
opt not to recover.
I suggest instead to add a bool force_reset parameter to 
amdgpu_gpu_recover which will override amdgpu_gpu_recovery and we can 
set it to true from amdgpu_debugfs_gpu_recover only.


Good point and the solution sounds good to me as well.

Please go ahead with that,
Christian.



Thanks,
Andrey




  dev_info(adev->dev, "GPU reset begin!\n");
    mutex_lock(&adev->lock_reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

index 0b039bd..5c612e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
  int amdgpu_hw_i2c = 0;
  int amdgpu_pcie_gen2 = -1;
  int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 1;
  int amdgpu_dpm = -1;
  int amdgpu_fw_load_type = -1;
  int amdgpu_aspm = -1;
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
  int amdgpu_job_hang_limit = 0;
  int amdgpu_lbpw = -1;
  int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = 1;
    MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in 
megabytes");

  module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -165,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, 
int, 0444);
  MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = 
auto)");

  module_param_named(msi, amdgpu_msi, int, 0444);
  -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms 
(default 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 
1)");

  module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
    MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 
= auto)");

@@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
  MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be 
spread across pipes (1 = enable, 0 = disable, -1 = auto)");
  module_param_named(compute_multipipe, amdgpu_compute_multipipe, 
int, 0444);
  +MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 
= enable (default) , 0 = disable");

+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
  #ifdef CONFIG_DRM_AMDGPU_SI
    #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/pp: need to notify umd the pstate clock.

2017-12-13 Thread Alex Deucher
On Wed, Dec 13, 2017 at 4:52 AM, Rex Zhu  wrote:
> Change-Id: I344731cc6398c40976e08a125808bbfa85cb59a3
> Signed-off-by: Rex Zhu 

Please include a better patch description.  Something like:
Flag the stable pstate clocks in sysfs so userspace knows what clocks
are in use when stable pstate is selected for profiling.

We also probably need to handles APUs.

Alex

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   | 43 
> +-
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 36 ++
>  drivers/gpu/drm/amd/powerplay/inc/hwmgr.h  |  2 +
>  3 files changed, 56 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> index 8edb0c4..ecf9449 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> @@ -2590,8 +2590,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr 
> *hwmgr, enum amd_dpm_forced_le
> break;
> }
> }
> -   if (count < 0 || level == 
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
> +   if (count < 0 || level == 
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
> *sclk_mask = 0;
> +   tmp_sclk = 
> table_info->vdd_dep_on_sclk->entries[0].clk;
> +   }
>
> if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
> *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
> @@ -2603,6 +2605,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr 
> *hwmgr, enum amd_dpm_forced_le
> *mclk_mask = golden_dpm_table->mclk_table.count - 1;
>
> *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
> +
> +   hwmgr->pstate_sclk = tmp_sclk;
> +   hwmgr->pstate_mclk = tmp_mclk;
> +
> return 0;
>  }
>
> @@ -2614,6 +2620,10 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
> uint32_t mclk_mask = 0;
> uint32_t pcie_mask = 0;
>
> +   ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, 
> &pcie_mask);
> +   if (ret)
> +   return ret;
> +
> switch (level) {
> case AMD_DPM_FORCED_LEVEL_HIGH:
> ret = smu7_force_dpm_highest(hwmgr);
> @@ -2628,9 +2638,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
> case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
> case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
> case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
> -   ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, 
> &mclk_mask, &pcie_mask);
> -   if (ret)
> -   return ret;
> smu7_force_clock_level(hwmgr, PP_SCLK, 1< smu7_force_clock_level(hwmgr, PP_MCLK, 1< smu7_force_clock_level(hwmgr, PP_PCIE, 1< @@ -4292,7 +4299,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr 
> *hwmgr,
> struct smu7_single_dpm_table *sclk_table = 
> &(data->dpm_table.sclk_table);
> struct smu7_single_dpm_table *mclk_table = 
> &(data->dpm_table.mclk_table);
> struct smu7_single_dpm_table *pcie_table = 
> &(data->dpm_table.pcie_speed_table);
> -   int i, now, size = 0;
> +   int i, now, p, size = 0;
> uint32_t clock, pcie_speed;
>
> switch (type) {
> @@ -4301,32 +4308,34 @@ static int smu7_print_clock_levels(struct pp_hwmgr 
> *hwmgr,
> clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
>
> for (i = 0; i < sclk_table->count; i++) {
> -   if (clock > sclk_table->dpm_levels[i].value)
> -   continue;
> -   break;
> +   if (hwmgr->pstate_sclk == 
> sclk_table->dpm_levels[i].value)
> +   p = i;
> +   if (clock <= sclk_table->dpm_levels[i].value)
> +   now = i;
> }
> -   now = i;
>
> for (i = 0; i < sclk_table->count; i++)
> -   size += sprintf(buf + size, "%d: %uMhz %s\n",
> +   size += sprintf(buf + size, "%d: %uMhz %s %s\n",
> i, sclk_table->dpm_levels[i].value / 
> 100,
> -   (i == now) ? "*" : "");
> +   (i == now) ? "*" : "",
> +   (i == p) ? "P" : "");
> break;
> case PP_MCLK:
> smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
> clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
>
> for (i = 0; i < mclk_table->count; i++) {
> -   if (clock > mclk_table->dpm_levels[i].value)
> -   continue;
> -   brea

Re: [PATCH] drm/amdgpu: Fix no irq process when evict vram

2017-12-13 Thread Deucher, Alexander
This seems like a good thing to do in general even with Monk's fix.


Acked-by: Alex Deucher 


From: amd-gfx  on behalf of Yintian Tao 

Sent: Tuesday, December 12, 2017 10:37:43 PM
To: amd-gfx@lists.freedesktop.org
Cc: Tao, Yintian
Subject: [PATCH] drm/amdgpu: Fix no irq process when evict vram

When unload amdgpu driver we use sdma to evict vram but there is no
irq process after sdma completed work which raises that waiting for the
fence costs 2s which will trigger VFLR under SRIOV and at last make
unload driver failed.The reason is that the shutdown varible in adev
is set to true before evict vram, it cause ISR directly return without
processing.Therefore, we need set the varible after evict vram.

Change-Id: I7bf75481aa0744b99c41672b49670adc70b478bd
Signed-off-by: Yintian Tao 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a269bbc..80934ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2458,7 +2458,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 int r;

 DRM_INFO("amdgpu: finishing device.\n");
-   adev->shutdown = true;
 if (adev->mode_info.mode_config_initialized)
 drm_crtc_force_disable_all(adev->ddev);

@@ -2466,6 +2465,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 amdgpu_fence_driver_fini(adev);
 amdgpu_fbdev_fini(adev);
 r = amdgpu_fini(adev);
+   adev->shutdown = true;
 if (adev->firmware.gpu_info_fw) {
 release_firmware(adev->firmware.gpu_info_fw);
 adev->firmware.gpu_info_fw = NULL;
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/pp: reset dpm level when adjust power state

2017-12-13 Thread Deucher, Alexander
Acked-by: Alex Deucher 


From: amd-gfx  on behalf of Rex Zhu 

Sent: Wednesday, December 13, 2017 4:52:48 AM
To: amd-gfx@lists.freedesktop.org
Cc: Zhu, Rex
Subject: [PATCH] drm/amd/pp: reset dpm level when adjust power state

Change-Id: I312d1cf7b964d25d698b02800ce7cd06ac33b28f
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ffa44bb..ab852b2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -244,7 +244,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, 
bool skip,
 }

 phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
-
+   phm_force_dpm_levels(hwmgr, hwmgr->dpm_level);
 return 0;
 }

--
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: fix NULL err for sriov detect

2017-12-13 Thread Deucher, Alexander
Reviewed-by: Alex Deucher 



From: amd-gfx  on behalf of Chunming 
Zhou 
Sent: Wednesday, December 13, 2017 4:03 AM
To: amd-gfx@lists.freedesktop.org
Cc: Zhou, David(ChunMing); Liu, Monk
Subject: [PATCH] drm/amdgpu: fix NULL err for sriov detect

[   21.841536] BUG: KASAN: null-ptr-deref in soc15_set_ip_blocks+0x4f/0x2e0 
[amdgpu]

Change-Id: I182dfed95c362123a75feafe44fa2ad3f3f35cac
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a16e8d9a8fa2..49ff552cd6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -521,6 +521,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 return -EINVAL;
 }

+   if (adev->flags & AMD_IS_APU)
+   adev->nbio_funcs = &nbio_v7_0_funcs;
+   else
+   adev->nbio_funcs = &nbio_v6_1_funcs;
+
 adev->nbio_funcs->detect_hw_virt(adev);

 if (amdgpu_sriov_vf(adev))
@@ -611,11 +616,6 @@ static int soc15_common_early_init(void *handle)

 adev->asic_funcs = &soc15_asic_funcs;

-   if (adev->flags & AMD_IS_APU)
-   adev->nbio_funcs = &nbio_v7_0_funcs;
-   else
-   adev->nbio_funcs = &nbio_v6_1_funcs;
-
 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
 psp_enabled = true;
--
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amdgpu:impl virt_gart_flush_tlbs

2017-12-13 Thread Alex Deucher
On Tue, Dec 12, 2017 at 10:42 PM, Monk Liu  wrote:
> a new gart flush tlb function implemented for SRIOV,
> and invoke it during RUNTIME for gart flush TLBs
>
> this could avoid the issue that gart flush (via CPU MMIO)
> being interrupted by word switch which lead to DMAR error
> on Host/IOMMU side, with this function the gart flush
> tlbs always run on KIQ with single PM4 package so it won't
> get interrupted before the flushing finished.
>
> Change-Id: I0849658d7945c3874b3cc0d9369a50e1aedb8312
> Signed-off-by: Monk Liu 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 27 +++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |  1 +
>  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c|  3 +++
>  3 files changed, 31 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index e7dfb7b..7a6ef64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -172,6 +172,33 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, 
> uint32_t reg, uint32_t v)
> DRM_ERROR("wait for kiq fence error: %ld\n", r);
>  }
>
> +int amdgpu_virt_gart_flush_tlbs(struct amdgpu_device *adev)
> +{
> +   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
> +   struct amdgpu_ring *ring = &kiq->ring;
> +   unsigned long flags;
> +   signed long r;
> +   uint32_t seq;
> +
> +   if(!ring->funcs->emit_invalidate_tlbs)
> +   return -ENOENT;
> +
> +   spin_lock_irqsave(&kiq->ring_lock, flags);
> +   amdgpu_ring_alloc(ring, 16);
> +   amdgpu_ring_emit_invalidate_tlbs(ring);
> +   amdgpu_fence_emit_polling(ring, &seq);
> +   amdgpu_ring_commit(ring);
> +   spin_unlock_irqrestore(&kiq->ring_lock, flags);
> +
> +   r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
> +   if (r < 1) {
> +   DRM_ERROR("wait for kiq invalidate tlbs error: %ld\n", r);
> +   return -ETIME;
> +   }
> +
> +   return 0;
> +}
> +
>  /**
>   * amdgpu_virt_request_full_gpu() - request full gpu access
>   * @amdgpu:amdgpu device.
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 6a83425..935fed3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -297,5 +297,6 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, 
> unsigned long obj_size,
> unsigned int key,
> unsigned int chksum);
>  void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +int amdgpu_virt_gart_flush_tlbs(struct amdgpu_device *adev);
>
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index 1b5dfcc..a195039 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -332,6 +332,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct 
> amdgpu_device *adev,
> /* flush hdp cache */
> adev->nbio_funcs->hdp_flush(adev);
>
> +   if (amdgpu_sriov_runtime(adev) && !amdgpu_virt_gart_flush_tlbs(adev))
> +   return;


Do we need a fw version check for the flush_tlb packet?

Alex

> +
> spin_lock(&adev->mc.invalidate_lock);
>
> for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Fix no irq process when evict vram

2017-12-13 Thread Mike Lothian
I'm hoping this fixes the slow shutdown time of my laptop, which seems to
switch the card back on right before switching off

It's been an issue since the introduction of KIQ (around the same time)

On Wed, 13 Dec 2017 at 12:07 Tao, Yintian  wrote:

> Hi  Lothian
>
>
>
>
>
> First of all, thanks for your review.
>
>
>
> No, it is the patch which achieve the same function for the issue. But it
> is the root cause of fence timeout.
>
> The patch b9141cd3
> 
>  is
> the word-around for the issue. And I think the varible “shutdown”
> assignment is better to be located after amdgpu_fini() to ensure no irq
> miss.
>
>
>
> Best Regards
>
> Yintian Tao
>
>
>
>
>
> *From:* Mike Lothian [mailto:m...@fireburn.co.uk]
> *Sent:* Wednesday, December 13, 2017 7:23 PM
> *To:* Tao, Yintian 
> *Cc:* amd-gfx@lists.freedesktop.org
> *Subject:* Re: [PATCH] drm/amdgpu: Fix no irq process when evict vram
>
>
>
> Is this a follow on to
> https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.16-wip&id=b9141cd3930e390f156739829ca9589fda7926e4
>
>
>
>
> On Wed, 13 Dec 2017 at 07:11 Yintian Tao  wrote:
>
> When unload amdgpu driver we use sdma to evict vram but there is no
> irq process after sdma completed work which raises that waiting for the
> fence costs 2s which will trigger VFLR under SRIOV and at last make
> unload driver failed.The reason is that the shutdown varible in adev
> is set to true before evict vram, it cause ISR directly return without
> processing.Therefore, we need set the varible after evict vram.
>
> Change-Id: I7bf75481aa0744b99c41672b49670adc70b478bd
> Signed-off-by: Yintian Tao 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index a269bbc..80934ee 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2458,7 +2458,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
> int r;
>
> DRM_INFO("amdgpu: finishing device.\n");
> -   adev->shutdown = true;
> if (adev->mode_info.mode_config_initialized)
> drm_crtc_force_disable_all(adev->ddev);
>
> @@ -2466,6 +2465,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
> amdgpu_fence_driver_fini(adev);
> amdgpu_fbdev_fini(adev);
> r = amdgpu_fini(adev);
> +   adev->shutdown = true;
> if (adev->firmware.gpu_info_fw) {
> release_firmware(adev->firmware.gpu_info_fw);
> adev->firmware.gpu_info_fw = NULL;
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Add gpu_recovery parameter

2017-12-13 Thread Andrey Grodzovsky



On 12/13/2017 07:20 AM, Christian König wrote:

Am 12.12.2017 um 20:16 schrieb Andrey Grodzovsky:

Add new parameter to control GPU recovery procedure.
Retire old way of disabling GPU recovery by setting lockup_timeout == 
0 and

set default for lockup_timeout to 10s.

Signed-off-by: Andrey Grodzovsky 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h    | 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c    | 8 ++--
  3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index 3735500..26abe03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
  extern int amdgpu_job_hang_limit;
  extern int amdgpu_lbpw;
  extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
    #ifdef CONFIG_DRM_AMDGPU_SI
  extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 8d03baa..d84b57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3030,6 +3030,11 @@ int amdgpu_gpu_recover(struct amdgpu_device 
*adev, struct amdgpu_job *job)

  return 0;
  }
  +    if (!amdgpu_gpu_recovery) {
+    DRM_INFO("GPU recovery disabled.\n");
+    return 0;
+    }
+


Please move this check into the caller of amdgpu_gpu_recover().

This way we can still trigger a GPU recovery manually or from the 
hypervisor under SRIOV.


Christian.


Problem with this is that amdgpu_check_soft_reset will not be called, 
this function which prints which IP block was hung even when later we 
opt not to recover.
I suggest instead to add a bool force_reset parameter to 
amdgpu_gpu_recover which will override amdgpu_gpu_recovery and we can 
set it to true from amdgpu_debugfs_gpu_recover only.


Thanks,
Andrey




  dev_info(adev->dev, "GPU reset begin!\n");
    mutex_lock(&adev->lock_reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

index 0b039bd..5c612e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
  int amdgpu_hw_i2c = 0;
  int amdgpu_pcie_gen2 = -1;
  int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 1;
  int amdgpu_dpm = -1;
  int amdgpu_fw_load_type = -1;
  int amdgpu_aspm = -1;
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
  int amdgpu_job_hang_limit = 0;
  int amdgpu_lbpw = -1;
  int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = 1;
    MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in 
megabytes");

  module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -165,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, 
int, 0444);
  MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = 
auto)");

  module_param_named(msi, amdgpu_msi, int, 0444);
  -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms 
(default 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 
1)");

  module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
    MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = 
auto)");

@@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
  MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be 
spread across pipes (1 = enable, 0 = disable, -1 = auto)");
  module_param_named(compute_multipipe, amdgpu_compute_multipipe, 
int, 0444);
  +MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 
= enable (default) , 0 = disable");

+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
  #ifdef CONFIG_DRM_AMDGPU_SI
    #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amdgpu:impl virt_gart_flush_tlbs

2017-12-13 Thread Christian König

Am 13.12.2017 um 04:42 schrieb Monk Liu:

a new gart flush tlb function implemented for SRIOV,
and invoke it during RUNTIME for gart flush TLBs

this could avoid the issue that gart flush (via CPU MMIO)
being interrupted by word switch which lead to DMAR error
on Host/IOMMU side, with this function the gart flush
tlbs always run on KIQ with single PM4 package so it won't
get interrupted before the flushing finished.

Change-Id: I0849658d7945c3874b3cc0d9369a50e1aedb8312
Signed-off-by: Monk Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 27 +++
  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |  1 +
  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c|  3 +++
  3 files changed, 31 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e7dfb7b..7a6ef64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -172,6 +172,33 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, 
uint32_t reg, uint32_t v)
DRM_ERROR("wait for kiq fence error: %ld\n", r);
  }
  
+int amdgpu_virt_gart_flush_tlbs(struct amdgpu_device *adev)

+{
+   struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+   struct amdgpu_ring *ring = &kiq->ring;
+   unsigned long flags;
+   signed long r;
+   uint32_t seq;
+
+   if(!ring->funcs->emit_invalidate_tlbs)
+   return -ENOENT;
+
+   spin_lock_irqsave(&kiq->ring_lock, flags);
+   amdgpu_ring_alloc(ring, 16);
+   amdgpu_ring_emit_invalidate_tlbs(ring);
+   amdgpu_fence_emit_polling(ring, &seq);
+   amdgpu_ring_commit(ring);
+   spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+   r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+   if (r < 1) {
+   DRM_ERROR("wait for kiq invalidate tlbs error: %ld\n", r);
+   return -ETIME;
+   }
+
+   return 0;
+}
+
  /**
   * amdgpu_virt_request_full_gpu() - request full gpu access
   * @amdgpu:   amdgpu device.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 6a83425..935fed3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -297,5 +297,6 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned 
long obj_size,
unsigned int key,
unsigned int chksum);
  void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+int amdgpu_virt_gart_flush_tlbs(struct amdgpu_device *adev);
  
  #endif

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 1b5dfcc..a195039 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -332,6 +332,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct 
amdgpu_device *adev,
/* flush hdp cache */
adev->nbio_funcs->hdp_flush(adev);
  
+	if (amdgpu_sriov_runtime(adev) && !amdgpu_virt_gart_flush_tlbs(adev))

+   return;
+


Better open code that like this:

if (amdgpu_sriov_runtime(adev)) {
    /* Try using the KIQ */
    r = amdgpu_virt_gart_flush_tlbs(adev));
    if (!r)
        return;
}

Apart from that it looks good to me.

Christian.


spin_lock(&adev->mc.invalidate_lock);
  
  	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/amdgpu:implement invalid tlbs with kiq

2017-12-13 Thread Christian König

Am 13.12.2017 um 04:42 schrieb Monk Liu:

Implement gart flush gpu tlbs with INVALIDATE_TLBS
package on gfx9/gmc9

Change-Id: I851fb93db17e04d19959768c01ba6c677cbb777c
Signed-off-by: Monk Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h  | 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 +
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c| 7 +++
  drivers/gpu/drm/amd/amdgpu/soc15d.h  | 6 +-
  4 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0cb2235..b3292cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1885,6 +1885,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
  #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
  #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
  #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
+#define amdgpu_ring_emit_invalidate_tlbs(r) 
(r)->funcs->emit_invalidate_tlbs((r))
  #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
  #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
  #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 010f690..6ad314e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -152,6 +152,7 @@ struct amdgpu_ring_funcs {
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+   void (*emit_invalidate_tlbs)(struct amdgpu_ring *ring);


At some point we should probably superset amdgpu_ring_funcs with and 
amdgpu_kiq_funcs structure.


But that can come in a later patch as well.


/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
  enum drm_sched_priority priority);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e9a668b..1a48a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3905,6 +3905,12 @@ static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring 
*ring, uint32_t reg,
amdgpu_ring_write(ring, val);
  }
  
+static void gfx_v9_ring_emit_invalidate_tlbs(struct amdgpu_ring *ring) {

+   amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+   amdgpu_ring_write(ring, PACKET3_INVALIDATE_TLBS_DST_SEL(0) |
+   
PACKET3_INVALIDATE_TLBS_ALL_HUB(1));


That is once more way to far indented to the right.

With that fixed the patch is Reviewed-by: Christian König 



Christian.


+}
+
  static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
 enum amdgpu_interrupt_state 
state)
  {
@@ -4280,6 +4286,7 @@ static const struct amdgpu_ring_funcs 
gfx_v9_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
+   .emit_invalidate_tlbs = gfx_v9_ring_emit_invalidate_tlbs,
  };
  
  static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h 
b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 7f408f8..f0d0b91 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -267,7 +267,11 @@
 * x=0: tmz_begin
 * x=1: tmz_end
 */
-
+#definePACKET3_INVALIDATE_TLBS 0x98
+#  define PACKET3_INVALIDATE_TLBS_DST_SEL(x) ((x) << 0)
+#  define PACKET3_INVALIDATE_TLBS_ALL_HUB(x) ((x) << 4)
+#  define PACKET3_INVALIDATE_TLBS_PASID(x)   ((x) << 5)
+#  define PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(x)  ((x) << 29)
  #define PACKET3_SET_RESOURCES 0xA0
  /* 1. header
   * 2. CONTROL


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/3] drm/amdgpu: drop scratch regs save and restore from S3/S4 handling

2017-12-13 Thread Christian König

Am 13.12.2017 um 00:52 schrieb Harry Wentland:

On 2017-12-12 03:27 PM, Alex Deucher wrote:

The expectation is that the base driver doesn't mess with these.
Some components interact with these directly so let the components
handle these directly.

Signed-off-by: Alex Deucher 

Series is Reviewed-by: Harry Wentland 


Acked-by: Christian König 

Christian.



Harry


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 --
  1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 98d62a991b67..ca1cf8a71dda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2577,7 +2577,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool 
suspend, bool fbcon)
 */
amdgpu_bo_evict_vram(adev);
  
-	amdgpu_atombios_scratch_regs_save(adev);

pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2626,7 +2625,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool 
resume, bool fbcon)
if (r)
goto unlock;
}
-   amdgpu_atombios_scratch_regs_restore(adev);
  
  	/* post card */

if (amdgpu_need_post(adev)) {


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: remove some old gc 9.x registers

2017-12-13 Thread Christian König

Am 12.12.2017 um 21:11 schrieb Alex Deucher:

Leftover from bring up.

Signed-off-by: Alex Deucher 


Acked-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h   |  8 ++--
  .../drm/amd/include/asic_reg/gc/gc_9_0_default.h   |  7 
  .../drm/amd/include/asic_reg/gc/gc_9_0_offset.h| 14 ---
  .../drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h   | 45 --
  .../drm/amd/include/asic_reg/gc/gc_9_1_offset.h| 14 ---
  5 files changed, 4 insertions(+), 84 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h 
b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 003a131bad47..567a904804bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -48,7 +48,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
  0x, // DB_STENCIL_WRITE_BASE
  0x, // DB_STENCIL_WRITE_BASE_HI
  0x, // DB_DFSM_CONTROL
-0x, // DB_RENDER_FILTER
+0, // HOLE
  0x, // DB_Z_INFO2
  0x, // DB_STENCIL_INFO2
  0, // HOLE
@@ -259,8 +259,8 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
  0x, // PA_SC_RIGHT_VERT_GRID
  0x, // PA_SC_LEFT_VERT_GRID
  0x, // PA_SC_HORIZ_GRID
-0x, // PA_SC_FOV_WINDOW_LR
-0x, // PA_SC_FOV_WINDOW_TB
+0, // HOLE
+0, // HOLE
  0, // HOLE
  0, // HOLE
  0, // HOLE
@@ -701,7 +701,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
  {
  0x, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
  0x, // VGT_DRAW_PAYLOAD_CNTL
-0x, // VGT_INDEX_PAYLOAD_CNTL
+0, // HOLE
  0x, // VGT_INSTANCE_STEP_RATE_0
  0x, // VGT_INSTANCE_STEP_RATE_1
  0, // HOLE
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h 
b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
index 663d3af35baf..5bf84c6d0ec3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
@@ -436,7 +436,6 @@
  #define mmTA_CNTL_DEFAULT 
   0x8004d850
  #define mmTA_CNTL_AUX_DEFAULT 
   0x
  #define mmTA_RESERVED_010C_DEFAULT
   0x
-#define mmTA_GRAD_ADJ_DEFAULT  
  0x4040
  #define mmTA_STATUS_DEFAULT   
   0x
  #define mmTA_SCRATCH_DEFAULT  
   0x
  
@@ -1700,7 +1699,6 @@

  #define mmDB_STENCIL_WRITE_BASE_DEFAULT   
   0x
  #define mmDB_STENCIL_WRITE_BASE_HI_DEFAULT
   0x
  #define mmDB_DFSM_CONTROL_DEFAULT 
   0x
-#define mmDB_RENDER_FILTER_DEFAULT 
  0x
  #define mmDB_Z_INFO2_DEFAULT  
   0x
  #define mmDB_STENCIL_INFO2_DEFAULT
   0x
  #define mmTA_BC_BASE_ADDR_DEFAULT 
   0x
@@ -1806,8 +1804,6 @@
  #define mmPA_SC_RIGHT_VERT_GRID_DEFAULT   
   0x
  #define mmPA_SC_LEFT_VERT_GRID_DEFAULT
   0x
  #define mmPA_SC_HORIZ_GRID_DEFAULT
   0x
-#define mmPA_SC_FOV_WINDOW_LR_DEFAULT  
  0x
-#define mmPA_SC_FOV_WINDOW_TB_DEFAULT  
  0x
  #define mmVGT_MULTI_PRIM_IB_RESET_INDX_DEFAULT
   0x
  #define mmCB_BLEND_RED_DEFAULT
   0x
  #define mmCB_BLEND_GREEN_DEFAULT  
   0x
@@ -2072,7 +2068,6 @@
  #define mmVGT_EVENT_INITIATOR_DEFAULT 
   0x
  #define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_DEFAULT   
   0x
  #define mmVGT_DRAW_PAYLOAD_CNTL_DEFAULT   
   0x
-#define mmVGT_INDEX_PAYLOAD_CNTL_DEFAULT   
  0x
  #define mmVGT_INSTANCE_STEP_RATE_0_DEFAULT
   0x
  #define mmVGT_INSTANCE_STEP_RATE_1_DEFAULT
   0x
  #define mmVGT_ESGS_RING_ITEMSIZE_DEFAULT  
   0x
@@ -2490,7 +2485,6 @@
  #define mmWD_INDEX_BUF_BASE_DEFAULT   
   0x
  #define mmWD_INDEX_BUF_BA

Re: [PATCH] drm/amdgpu: Add gpu_recovery parameter

2017-12-13 Thread Christian König

Am 12.12.2017 um 20:16 schrieb Andrey Grodzovsky:

Add new parameter to control GPU recovery procedure.
Retire old way of disabling GPU recovery by setting lockup_timeout == 0 and
set default for lockup_timeout to 10s.

Signed-off-by: Andrey Grodzovsky 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h| 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c| 8 ++--
  3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3735500..26abe03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
  extern int amdgpu_job_hang_limit;
  extern int amdgpu_lbpw;
  extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
  
  #ifdef CONFIG_DRM_AMDGPU_SI

  extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8d03baa..d84b57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3030,6 +3030,11 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, 
struct amdgpu_job *job)
return 0;
}
  
+	if (!amdgpu_gpu_recovery) {

+   DRM_INFO("GPU recovery disabled.\n");
+   return 0;
+   }
+


Please move this check into the caller of amdgpu_gpu_recover().

This way we can still trigger a GPU recovery manually or from the 
hypervisor under SRIOV.


Christian.


dev_info(adev->dev, "GPU reset begin!\n");
  
  	mutex_lock(&adev->lock_reset);

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0b039bd..5c612e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
  int amdgpu_hw_i2c = 0;
  int amdgpu_pcie_gen2 = -1;
  int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 1;
  int amdgpu_dpm = -1;
  int amdgpu_fw_load_type = -1;
  int amdgpu_aspm = -1;
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
  int amdgpu_job_hang_limit = 0;
  int amdgpu_lbpw = -1;
  int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = 1;
  
  MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");

  module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -165,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
  MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
  module_param_named(msi, amdgpu_msi, int, 0444);
  
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");

+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 1)");
  module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
  
  MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");

@@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
  MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across 
pipes (1 = enable, 0 = disable, -1 = auto)");
  module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
  
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable (default) , 0 = disable");

+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
  #ifdef CONFIG_DRM_AMDGPU_SI
  
  #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: Fix no irq process when evict vram

2017-12-13 Thread Tao, Yintian
Hi  Lothian


First of all, thanks for your review.

No, it is the patch which achieve the same function for the issue. But it is 
the root cause of fence timeout.
The patch 
b9141cd3
 is the word-around for the issue. And I think the varible “shutdown” 
assignment is better to be located after amdgpu_fini() to ensure no irq miss.

Best Regards
Yintian Tao


From: Mike Lothian [mailto:m...@fireburn.co.uk]
Sent: Wednesday, December 13, 2017 7:23 PM
To: Tao, Yintian 
Cc: amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix no irq process when evict vram

Is this a follow on to 
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.16-wip&id=b9141cd3930e390f156739829ca9589fda7926e4

On Wed, 13 Dec 2017 at 07:11 Yintian Tao mailto:yt...@amd.com>> 
wrote:
When unload amdgpu driver we use sdma to evict vram but there is no
irq process after sdma completed work which raises that waiting for the
fence costs 2s which will trigger VFLR under SRIOV and at last make
unload driver failed.The reason is that the shutdown varible in adev
is set to true before evict vram, it cause ISR directly return without
processing.Therefore, we need set the varible after evict vram.

Change-Id: I7bf75481aa0744b99c41672b49670adc70b478bd
Signed-off-by: Yintian Tao mailto:yt...@amd.com>>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a269bbc..80934ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2458,7 +2458,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
int r;

DRM_INFO("amdgpu: finishing device.\n");
-   adev->shutdown = true;
if (adev->mode_info.mode_config_initialized)
drm_crtc_force_disable_all(adev->ddev);

@@ -2466,6 +2465,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
+   adev->shutdown = true;
if (adev->firmware.gpu_info_fw) {
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Fix no irq process when evict vram

2017-12-13 Thread Mike Lothian
Is this a follow on to
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.16-wip&id=b9141cd3930e390f156739829ca9589fda7926e4


On Wed, 13 Dec 2017 at 07:11 Yintian Tao  wrote:

> When unload amdgpu driver we use sdma to evict vram but there is no
> irq process after sdma completed work which raises that waiting for the
> fence costs 2s which will trigger VFLR under SRIOV and at last make
> unload driver failed.The reason is that the shutdown varible in adev
> is set to true before evict vram, it cause ISR directly return without
> processing.Therefore, we need set the varible after evict vram.
>
> Change-Id: I7bf75481aa0744b99c41672b49670adc70b478bd
> Signed-off-by: Yintian Tao 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index a269bbc..80934ee 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2458,7 +2458,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
> int r;
>
> DRM_INFO("amdgpu: finishing device.\n");
> -   adev->shutdown = true;
> if (adev->mode_info.mode_config_initialized)
> drm_crtc_force_disable_all(adev->ddev);
>
> @@ -2466,6 +2465,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
> amdgpu_fence_driver_fini(adev);
> amdgpu_fbdev_fini(adev);
> r = amdgpu_fini(adev);
> +   adev->shutdown = true;
> if (adev->firmware.gpu_info_fw) {
> release_firmware(adev->firmware.gpu_info_fw);
> adev->firmware.gpu_info_fw = NULL;
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pp: need to notify umd the pstate clock.

2017-12-13 Thread Rex Zhu
Change-Id: I344731cc6398c40976e08a125808bbfa85cb59a3
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   | 43 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 36 ++
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h  |  2 +
 3 files changed, 56 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 8edb0c4..ecf9449 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2590,8 +2590,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr 
*hwmgr, enum amd_dpm_forced_le
break;
}
}
-   if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+   if (count < 0 || level == 
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
*sclk_mask = 0;
+   tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+   }
 
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -2603,6 +2605,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr 
*hwmgr, enum amd_dpm_forced_le
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
 
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
+
+   hwmgr->pstate_sclk = tmp_sclk;
+   hwmgr->pstate_mclk = tmp_mclk;
+
return 0;
 }
 
@@ -2614,6 +2620,10 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
 
+   ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, 
&pcie_mask);
+   if (ret)
+   return ret;
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
@@ -2628,9 +2638,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-   ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, 
&mclk_mask, &pcie_mask);
-   if (ret)
-   return ret;
smu7_force_clock_level(hwmgr, PP_SCLK, 1dpm_table.mclk_table);
struct smu7_single_dpm_table *pcie_table = 
&(data->dpm_table.pcie_speed_table);
-   int i, now, size = 0;
+   int i, now, p, size = 0;
uint32_t clock, pcie_speed;
 
switch (type) {
@@ -4301,32 +4308,34 @@ static int smu7_print_clock_levels(struct pp_hwmgr 
*hwmgr,
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
 
for (i = 0; i < sclk_table->count; i++) {
-   if (clock > sclk_table->dpm_levels[i].value)
-   continue;
-   break;
+   if (hwmgr->pstate_sclk == 
sclk_table->dpm_levels[i].value)
+   p = i;
+   if (clock <= sclk_table->dpm_levels[i].value)
+   now = i;
}
-   now = i;
 
for (i = 0; i < sclk_table->count; i++)
-   size += sprintf(buf + size, "%d: %uMhz %s\n",
+   size += sprintf(buf + size, "%d: %uMhz %s %s\n",
i, sclk_table->dpm_levels[i].value / 
100,
-   (i == now) ? "*" : "");
+   (i == now) ? "*" : "",
+   (i == p) ? "P" : "");
break;
case PP_MCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
 
for (i = 0; i < mclk_table->count; i++) {
-   if (clock > mclk_table->dpm_levels[i].value)
-   continue;
-   break;
+   if (hwmgr->pstate_mclk == 
mclk_table->dpm_levels[i].value)
+   p = i;
+   if (clock <= mclk_table->dpm_levels[i].value)
+   now = i;
}
-   now = i;
 
for (i = 0; i < mclk_table->count; i++)
-   size += sprintf(buf + size, "%d: %uMhz %s\n",
+   size += sprintf(buf + size, "%d: %uMhz %s %s\n",
i, mclk_table->dpm_levels[i].value / 
100,
-   (i == now) ? "*" : "");
+   (i == now) ? "*" : "",
+   (i == p) ? "P"

[PATCH] drm/amd/pp: reset dpm level when adjust power state

2017-12-13 Thread Rex Zhu
Change-Id: I312d1cf7b964d25d698b02800ce7cd06ac33b28f
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ffa44bb..ab852b2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -244,7 +244,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, 
bool skip,
}
 
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
-
+   phm_force_dpm_levels(hwmgr, hwmgr->dpm_level);
return 0;
 }
 
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/3] drm/amd/amdgpu: init allow_reserved_eviction and resv when create a new bo

2017-12-13 Thread Michel Dänzer
On 2017-12-13 06:17 AM, Roger He wrote:
> Change-Id: I0c6571c2a64e6c5bdad80ccbcccb40eba1c20b4e
> Signed-off-by: Roger He 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index dc0a8be..7c7f56f5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -327,7 +327,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
>  uint64_t init_value,
>  struct amdgpu_bo **bo_ptr)
>  {
> - struct ttm_operation_ctx ctx = { !kernel, false };
> + struct ttm_operation_ctx ctx = { !kernel, false, true, resv };

Please use named initializers, to make it easier to understand which
field is initialized to which value.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/3] drm/amd/amdgpu: init allow_reserved_eviction and resv when create a new bo

2017-12-13 Thread Christian König

Am 13.12.2017 um 06:17 schrieb Roger He:

Change-Id: I0c6571c2a64e6c5bdad80ccbcccb40eba1c20b4e
Signed-off-by: Roger He 


We should supply the resv object in amdgpu_cs_bo_validate() as well, or 
otherwise the deleted object handling won't work as desired any more.


Apart from that looks good to me.

Christian.


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index dc0a8be..7c7f56f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -327,7 +327,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
   uint64_t init_value,
   struct amdgpu_bo **bo_ptr)
  {
-   struct ttm_operation_ctx ctx = { !kernel, false };
+   struct ttm_operation_ctx ctx = { !kernel, false, true, resv };
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 29/37] PCI: Add pci_enable_atomic_ops_to_root

2017-12-13 Thread Bjorn Helgaas
[+cc Ram, Michal, Ariel, Doug, Jason]

The [29/37] in the subject makes it look like this is part of a larger
series, but I can't find the rest of it on linux-pci or linux-kernel.

I don't want to merge a new interface unless there's an in-tree user
of it.  I assume the rest of the series includes a user.

On Fri, Dec 08, 2017 at 11:09:07PM -0500, Felix Kuehling wrote:
> From: Jay Cornwall 
> 
> The PCIe 3.0 AtomicOp (6.15) feature allows atomic transctions to be
> requested by, routed through and completed by PCIe components. Routing and
> completion do not require software support. Component support for each is
> detectable via the DEVCAP2 register.
> 
> AtomicOp requests are permitted only if a component's
> DEVCTL2.ATOMICOP_REQUESTER_ENABLE field is set. This capability cannot be
> detected but is a no-op if set on a component with no support. These
> requests can only be serviced if the upstream components support AtomicOp
> completion and/or routing to a component which does.
> 
> A concrete example is the AMD Fiji-class GPU, which is specified to
> support AtomicOp requests, routed through a PLX 8747 switch (advertising
> AtomicOp routing) to a Haswell host bridge (advertising AtomicOp
> completion support). When AtomicOp requests are disabled the GPU logs
> attempts to initiate requests to an MMIO register for debugging.
> 
> Add pci_enable_atomic_ops_to_root for per-device control over AtomicOp
> requests. Upstream bridges are checked for AtomicOp routing capability and
> the call fails if any lack this capability. The root port is checked for
> AtomicOp completion capabilities and the call fails if it does not support
> any. Routes to other PCIe components are not checked for AtomicOp routing
> and completion capabilities.
> 
> v2: Check for AtomicOp route to root port with AtomicOp completion
> v3: Style fixes
> v4: Endpoint to root port only, check upstream egress blocking
> v5: Rebase, use existing PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK define
> 
> CC: linux-...@vger.kernel.org
> Signed-off-by: Jay Cornwall 
> Signed-off-by: Felix Kuehling 
> ---
>  drivers/pci/pci.c | 81 
> +++
>  include/linux/pci.h   |  1 +
>  include/uapi/linux/pci_regs.h |  2 ++
>  3 files changed, 84 insertions(+)
> 
> diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
> index 6078dfc..89a8bb0 100644
> --- a/drivers/pci/pci.c
> +++ b/drivers/pci/pci.c
> @@ -2966,6 +2966,87 @@ bool pci_acs_path_enabled(struct pci_dev *start,
>  }
>  
>  /**
> + * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
> + * @dev: the PCI device
> + *
> + * Return 0 if the device is capable of generating AtomicOp requests,

I don't believe this part.

You return 0 if the upstream path can route AtomicOps and the Root
Port can complete them.  But there's nothing here that's conditional
on capabilities of *dev*.

You could read back PCI_EXP_DEVCTL2 to see if
PCI_EXP_DEVCTL2_ATOMIC_REQ was writable, but even then, you can't
really tell what the device is capable of.

> + * all upstream bridges support AtomicOp routing, egress blocking is disabled
> + * on all upstream ports, and the root port supports 32-bit, 64-bit and/or
> + * 128-bit AtomicOp completion, or negative otherwise.
> + */
> +int pci_enable_atomic_ops_to_root(struct pci_dev *dev)
> +{
> + struct pci_bus *bus = dev->bus;
> +
> + if (!pci_is_pcie(dev))
> + return -EINVAL;
> +
> + switch (pci_pcie_type(dev)) {
> + /*
> +  * PCIe 3.0, 6.15 specifies that endpoints and root ports are permitted
> +  * to implement AtomicOp requester capabilities.
> +  */
> + case PCI_EXP_TYPE_ENDPOINT:
> + case PCI_EXP_TYPE_LEG_END:
> + case PCI_EXP_TYPE_RC_END:
> + break;
> + default:
> + return -EINVAL;
> + }
> +
> + while (bus->parent) {
> + struct pci_dev *bridge = bus->self;
> + u32 cap;
> +
> + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
> +
> + switch (pci_pcie_type(bridge)) {
> + /*
> +  * Upstream, downstream and root ports may implement AtomicOp
> +  * routing capabilities. AtomicOp routing via a root port is
> +  * not considered.
> +  */
> + case PCI_EXP_TYPE_UPSTREAM:
> + case PCI_EXP_TYPE_DOWNSTREAM:
> + if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
> + return -EINVAL;
> + break;
> +
> + /*
> +  * Root ports are permitted to implement AtomicOp completion
> +  * capabilities.
> +  */
> + case PCI_EXP_TYPE_ROOT_PORT:
> + if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
> +  PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
> +  PCI_EXP_DEVCAP2_ATOMIC_COMP128)))
> + return -EINVA

Re: [PATCH 29/37] PCI: Add pci_enable_atomic_ops_to_root

2017-12-13 Thread Jason Gunthorpe
On Tue, Dec 12, 2017 at 05:27:07PM -0600, Bjorn Helgaas wrote:
> [+cc Ram, Michal, Ariel, Doug, Jason]
> 
> The [29/37] in the subject makes it look like this is part of a larger
> series, but I can't find the rest of it on linux-pci or linux-kernel.

Didn't find the cover letter, but the AMD patchworks captured the series..

https://patchwork.freedesktop.org/project/amd-xorg-ddx/patches/

> I don't want to merge a new interface unless there's an in-tree user
> of it.  I assume the rest of the series includes a user.

Looks like it.

I would also guess we will see users in drivers/infiniband emerge as
CPU coherent atomics are also a topic our hardware drivers will be
interested in. But I am not aware of any pending patches.

Jason
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: AMD WX7100 screen display problem on AArch64 architecture server.

2017-12-13 Thread Michel Dänzer
On 2017-12-13 08:14 AM, Lvzhihong (ReJohn) wrote:
> Hi,
> 
>    We met a problem on ubuntu17.10 for arm server with amdgpu(AMD
> RADEON PRO WX7100),  we use open source driver which are integrated in
> ubuntu17.10. And the architecture is AArch64-linux-gnu.
> 
>  we install :
> 
>  apt-get install xserver-xorg xinit xfce4 and mesa-utils glmark2
> 
>  we start x server :
> 
>   startx
> 
>  and then the monitor shows the screen and the screen is
> blurred( something wrong).
> 
> cid:image001.jpg@01D37422.3B3670D0
> 
>  And I have tried some opengl applications, the output has same
> problem.(something is missing or  in the wrong place.)
> 
>  
> 
>  But in a x86_64 architecture server, with same OS. The screen
> output is normal. (I check xorg\DDX\mesa\libdrm etc.all the versions are
> the same with aarch64 server.)
> 
> What I have done:
> 
>  1、I upgrade kernel to 4.15-rc2 ,upgrade DRM to 3.23,upgrade
> DDX to 1.40,upgrade mesa to 17.2.6, but the problem still exist.
> 
>  2、I enable ‘shadowprimary’ option,*the screen output became
> normal*, but the*performance drop quickly*——glxgears drop from 4800fps
> to 600fps, glmark drop from 4300 score to 730 score.

With ShadowPrimary enabled, all normal X11 drawing is performed by the
CPU, not the GPU.

Does enabling ShadowPrimary also make OpenGL applications look correct?
If not, it's most likely a Mesa or maybe LLVM issue.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/3] drm/ttm: add allow_reserved_eviction and resv into ttm_operation_ctx

2017-12-13 Thread Christian König

Am 13.12.2017 um 06:17 schrieb Roger He:

allow_reserved_eviction: Allow eviction of reserved BOs
resv: Reservation object to allow reserved evictions with

Change-Id: I01ea482e8c7470014196eb218e2ff8913306eef0
Signed-off-by: Roger He 


Reviewed-by: Christian König 


---
  include/drm/ttm/ttm_bo_api.h | 4 
  1 file changed, 4 insertions(+)

diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 368eb02..c126330 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -263,6 +263,8 @@ struct ttm_bo_kmap_obj {
   *
   * @interruptible: Sleep interruptible if sleeping.
   * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @allow_reserved_eviction: Allow eviction of reserved BOs.
+ * @resv: Reservation object to allow reserved evictions with.
   *
   * Context for TTM operations like changing buffer placement or general memory
   * allocation.
@@ -270,6 +272,8 @@ struct ttm_bo_kmap_obj {
  struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
+   bool allow_reserved_eviction;
+   struct reservation_object *resv;
uint64_t bytes_moved;
  };
  


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/3] drm/ttm: enable eviction for Per-VM-BO

2017-12-13 Thread Christian König

Am 13.12.2017 um 06:17 schrieb Roger He:

Change-Id: I0c6ece0decd18d30ccc94e5c7ca106d351941c62
Signed-off-by: Roger He 
---
  drivers/gpu/drm/ttm/ttm_bo.c | 12 
  1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 098b22e..e7438b0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -707,7 +707,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
  EXPORT_SYMBOL(ttm_bo_eviction_valuable);
  
  static int ttm_mem_evict_first(struct ttm_bo_device *bdev,

-  struct reservation_object *resv,
   uint32_t mem_type,
   const struct ttm_place *place,
   struct ttm_operation_ctx *ctx)
@@ -722,10 +721,8 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
-   if (bo->resv == resv) {
-   if (list_empty(&bo->ddestroy))
-   continue;
-   } else {
+   if (!ctx->allow_reserved_eviction ||
+   bo->resv != ctx->resv) {


That will still disable destroyed BO handling during command submission 
which isn't a good idea I think.


Apart from that the "bo->resv != ctx->resv" is still indented to far to 
the right. This makes it look like it belongs to the "locked = ..." on 
the line below.


Christian.


locked = reservation_object_trylock(bo->resv);
if (!locked)
continue;
@@ -835,7 +832,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object 
*bo,
return ret;
if (mem->mm_node)
break;
-   ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
+   ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -1332,8 +1329,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device 
*bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
-   ret = ttm_mem_evict_first(bdev, NULL, mem_type,
- NULL, &ctx);
+   ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
if (ret)
return ret;
spin_lock(&glob->lru_lock);


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/6] drm/ttm: init locked again to prevent incorrect unlock

2017-12-13 Thread Christian König

Am 13.12.2017 um 03:06 schrieb He, Roger:

That is a bug fix, isn't it? If yes maybe add CC:stable and commit it 
first before all other patches.

Fortunately so far there is no issue directly resulted from that.


Yeah, but that is irrelevant. Patches are classified as fix if they fix 
something, not if the bug was ever hit.


Not sure if the code is already upstream, but we should still make sure 
that we send this patch to the appropriate places.


Christian.



Thanks
Roger(Hongbo.He)

-Original Message-
From: Christian König [mailto:ckoenig.leichtzumer...@gmail.com]
Sent: Tuesday, December 12, 2017 6:37 PM
To: He, Roger ; amd-gfx@lists.freedesktop.org; 
dri-de...@lists.freedesktop.org
Subject: Re: [PATCH 4/6] drm/ttm: init locked again to prevent incorrect unlock

Am 12.12.2017 um 10:33 schrieb Roger He:

Change-Id: I8db51d843955f5db14bb4bbff892eaedbd9f0abe
Signed-off-by: Roger He 

Reviewed-by: Christian König 

That is a bug fix, isn't it? If yes maybe add CC:stable and commit it first 
before all other patches.

Christian.


---
   drivers/gpu/drm/ttm/ttm_bo.c | 1 +
   1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c
b/drivers/gpu/drm/ttm/ttm_bo.c index 17fe8be..eb8c568 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -735,6 +735,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  place)) {
if (locked)
reservation_object_unlock(bo->resv);
+   locked = false;
continue;
}
break;


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: fix NULL err for sriov detect

2017-12-13 Thread Chunming Zhou
[   21.841536] BUG: KASAN: null-ptr-deref in soc15_set_ip_blocks+0x4f/0x2e0 
[amdgpu]

Change-Id: I182dfed95c362123a75feafe44fa2ad3f3f35cac
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a16e8d9a8fa2..49ff552cd6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -521,6 +521,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
 
+   if (adev->flags & AMD_IS_APU)
+   adev->nbio_funcs = &nbio_v7_0_funcs;
+   else
+   adev->nbio_funcs = &nbio_v6_1_funcs;
+
adev->nbio_funcs->detect_hw_virt(adev);
 
if (amdgpu_sriov_vf(adev))
@@ -611,11 +616,6 @@ static int soc15_common_early_init(void *handle)
 
adev->asic_funcs = &soc15_asic_funcs;
 
-   if (adev->flags & AMD_IS_APU)
-   adev->nbio_funcs = &nbio_v7_0_funcs;
-   else
-   adev->nbio_funcs = &nbio_v6_1_funcs;
-
if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
psp_enabled = true;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx