Re: [PATCH 2/4] drm/amdgpu: minor optimize VM moved handling v2

2018-01-03 Thread Zhou, David(ChunMing)


On 2018年01月02日 22:47, Christian König wrote:
> Try to lock moved BOs if it's successful we can update the
> PTEs directly to the new location.
>
> v2: rebase
>
> Signed-off-by: Christian König 
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++-
>   1 file changed, 14 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 3632c69f1814..c1c5ccdee783 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -1697,18 +1697,31 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
>   spin_lock(&vm->status_lock);
>   while (!list_empty(&vm->moved)) {
>   struct amdgpu_bo_va *bo_va;
> + struct reservation_object *resv;
>   
>   bo_va = list_first_entry(&vm->moved,
>   struct amdgpu_bo_va, base.vm_status);
>   spin_unlock(&vm->status_lock);
>   
> + resv = bo_va->base.bo->tbo.resv;
> +
>   /* Per VM BOs never need to bo cleared in the page tables */
This reminders us Per-VM-BOs need to cleared as well after we allow to 
evict/swap out per-vm-bos.

Regards,
David Zhou
> - clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
> + if (resv == vm->root.base.bo->tbo.resv)
> + clear = false;
> + /* Try to reserve the BO to avoid clearing its ptes */
> + else if (reservation_object_trylock(resv))
> + clear = false;
> + /* Somebody else is using the BO right now */
> + else
> + clear = true;
>   
>   r = amdgpu_vm_bo_update(adev, bo_va, clear);
>   if (r)
>   return r;
>   
> + if (!clear && resv != vm->root.base.bo->tbo.resv)
> + reservation_object_unlock(resv);
> +
>   spin_lock(&vm->status_lock);
>   }
>   spin_unlock(&vm->status_lock);

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/4] drm/amdgpu: simplify huge page handling

2018-01-03 Thread Chunming Zhou



On 2018年01月02日 22:47, Christian König wrote:

Update the PDEs after resetting the huge flag.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 60 ++
  1 file changed, 18 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c1c5ccdee783..81505870eebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -946,54 +946,38 @@ static void amdgpu_vm_handle_huge_pages(struct 
amdgpu_pte_update_params *p,
unsigned nptes, uint64_t dst,
uint64_t flags)
  {
-   bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
uint64_t pd_addr, pde;
  
  	/* In the case of a mixed PT the PDE must point to it*/

-   if (p->adev->asic_type < CHIP_VEGA10 || p->src ||
-   nptes != AMDGPU_VM_PTE_COUNT(p->adev)) {
-   dst = amdgpu_bo_gpu_offset(entry->base.bo);
-   flags = AMDGPU_PTE_VALID;
-   } else {
+   if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
+   nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
/* Set the huge page flag to stop scanning at this PDE */
flags |= AMDGPU_PDE_PTE;
}
  
-	if (!entry->huge && !(flags & AMDGPU_PDE_PTE))

+   if (!(flags & AMDGPU_PDE_PTE)) {
+   if (entry->huge) {
+   /* Add the entry to the relocated list to update it. */
+   entry->huge = false;
+   spin_lock(&p->vm->status_lock);
+   list_move(&entry->base.vm_status, &p->vm->relocated);
+   spin_unlock(&p->vm->status_lock);
+   }
return;
-   entry->huge = !!(flags & AMDGPU_PDE_PTE);
+   }
  
+	entry->huge = true;

amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
   &dst, &flags);
  
-	if (use_cpu_update) {

-   /* In case a huge page is replaced with a system
-* memory mapping, p->pages_addr != NULL and
-* amdgpu_vm_cpu_set_ptes would try to translate dst
-* through amdgpu_vm_map_gart. But dst is already a
-* GPU address (of the page table). Disable
-* amdgpu_vm_map_gart temporarily.
-*/
-   dma_addr_t *tmp;
-
-   tmp = p->pages_addr;
-   p->pages_addr = NULL;
-
-   pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
-   pde = pd_addr + (entry - parent->entries) * 8;
-   amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
-
-   p->pages_addr = tmp;
-   } else {
-   if (parent->base.bo->shadow) {
-   pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
-   pde = pd_addr + (entry - parent->entries) * 8;
-   amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
-   }
-   pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+   if (parent->base.bo->shadow) {
+   pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
pde = pd_addr + (entry - parent->entries) * 8;
-   amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+   p->func(p, pde, dst, 1, 0, flags);
}
+   pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+   pde = pd_addr + (entry - parent->entries) * 8;
+   p->func(p, pde, dst, 1, 0, flags);
  }
  
  /**

@@ -1205,12 +1189,6 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
  
-	/* one PDE write for each huge page */

-   if (vm->root.base.bo->shadow)
-   ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2;
-   else
-   ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
I didn't get why removing this, although you put updating PDE after huge 
flag resets, for huge flag no-reset case, we still do huge page PDE 
update, don't we?


Regards,
David Zhou

-
if (pages_addr) {
/* copy commands needed */
ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
@@ -1285,8 +1263,6 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
  
  error_free:

amdgpu_job_free(job);
-   amdgpu_vm_invalidate_level(adev, vm, &vm->root,
-  adev->vm_manager.root_level);
return r;
  }
  


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


BUG: KASAN: use-after-free in amdgpu_job_free_cb

2018-01-03 Thread Johannes Hirte
I still get a use-after-free with linux-4.15-rc6:

[   16.788943] 
==
[   16.788968] BUG: KASAN: use-after-free in amdgpu_job_free_cb+0x140/0x150
[   16.788975] Read of size 8 at addr 8803dfe4b3c8 by task kworker/0:2/1355

[   16.788986] CPU: 0 PID: 1355 Comm: kworker/0:2 Not tainted 4.15.0-rc6 #438
[   16.788990] Hardware name: HP HP ProBook 645 G2/80FE, BIOS N77 Ver. 01.10 
10/12/2017
[   16.788998] Workqueue: events amd_sched_job_finish
[   16.789003] Call Trace:
[   16.789012]  dump_stack+0x99/0x11e
[   16.789018]  ? _atomic_dec_and_lock+0x152/0x152
[   16.789026]  print_address_description+0x65/0x270
[   16.789032]  kasan_report+0x272/0x360
[   16.789038]  ? amdgpu_job_free_cb+0x140/0x150
[   16.789043]  amdgpu_job_free_cb+0x140/0x150
[   16.789049]  amd_sched_job_finish+0x288/0x560
[   16.789055]  ? amd_sched_process_job+0x220/0x220
[   16.789061]  ? __queue_delayed_work+0x211/0x360
[   16.789067]  ? pick_next_task_fair+0xcff/0x10f0
[   16.789073]  ? _raw_spin_unlock_irq+0xbe/0x120
[   16.789077]  ? _raw_spin_unlock+0x120/0x120
[   16.789082]  process_one_work+0x84b/0x1600
[   16.789088]  ? tick_nohz_dep_clear_signal+0x20/0x20
[   16.789093]  ? _raw_spin_unlock_irq+0xbe/0x120
[   16.789097]  ? _raw_spin_unlock+0x120/0x120
[   16.789101]  ? pwq_dec_nr_in_flight+0x3c0/0x3c0
[   16.789107]  ? compat_start_thread+0x70/0x70
[   16.789111]  ? cyc2ns_read_end+0x20/0x20
[   16.789117]  ? finish_task_switch+0x27d/0x7f0
[   16.789121]  ? wq_worker_waking_up+0xc0/0xc0
[   16.789127]  ? sched_clock_cpu+0x18/0x1e0
[   16.789133]  ? task_change_group_fair+0x7e0/0x7e0
[   16.789139]  ? pci_mmcfg_check_reserved+0x100/0x100
[   16.789143]  ? load_balance+0x3120/0x3120
[   16.789148]  ? perf_event_exit_task+0x91f/0xe20
[   16.789156]  ? schedule+0xfb/0x3b0
[   16.789160]  ? __schedule+0x19b0/0x19b0
[   16.789165]  ? _raw_spin_unlock_irq+0xb9/0x120
[   16.789169]  ? _raw_spin_unlock_irq+0xbe/0x120
[   16.789172]  ? _raw_spin_unlock+0x120/0x120
[   16.789177]  worker_thread+0x211/0x1790
[   16.789184]  ? pick_next_task_fair+0x97d/0x10f0
[   16.789188]  ? trace_event_raw_event_workqueue_work+0x170/0x170
[   16.789194]  ? tick_nohz_dep_clear_signal+0x20/0x20
[   16.789199]  ? _raw_spin_unlock_irq+0xbe/0x120
[   16.789202]  ? _raw_spin_unlock+0x120/0x120
[   16.789207]  ? compat_start_thread+0x70/0x70
[   16.789212]  ? finish_task_switch+0x27d/0x7f0
[   16.789217]  ? sched_clock_cpu+0x18/0x1e0
[   16.789223]  ? ret_from_fork+0x1f/0x30
[   16.789228]  ? pci_mmcfg_check_reserved+0x100/0x100
[   16.789233]  ? get_task_cred+0x210/0x210
[   16.789238]  ? cyc2ns_read_end+0x20/0x20
[   16.789245]  ? schedule+0xfb/0x3b0
[   16.789249]  ? __schedule+0x19b0/0x19b0
[   16.789254]  ? remove_wait_queue+0x2b0/0x2b0
[   16.789258]  ? arch_vtime_task_switch+0xee/0x190
[   16.789263]  ? _raw_spin_unlock_irqrestore+0xc2/0x130
[   16.789267]  ? _raw_spin_unlock_irq+0x120/0x120
[   16.789273]  ? trace_event_raw_event_workqueue_work+0x170/0x170
[   16.789277]  kthread+0x2d4/0x390
[   16.789282]  ? kthread_create_worker+0xd0/0xd0
[   16.789286]  ? umh_complete+0x60/0x60
[   16.789290]  ret_from_fork+0x1f/0x30

[   16.789298] Allocated by task 2385:
[   16.789304]  kasan_kmalloc+0xa0/0xd0
[   16.789309]  kmem_cache_alloc_trace+0xd1/0x1e0
[   16.789314]  amdgpu_driver_open_kms+0x12b/0x4d0
[   16.789320]  drm_open+0x7c3/0x1100
[   16.789324]  drm_stub_open+0x2a8/0x400
[   16.789329]  chrdev_open+0x1eb/0x5a0
[   16.789333]  do_dentry_open+0x5a1/0xc50
[   16.789337]  path_openat+0x11d3/0x4e90
[   16.789341]  do_filp_open+0x239/0x3c0
[   16.789344]  do_sys_open+0x402/0x630
[   16.789349]  do_syscall_64+0x220/0x670
[   16.789353]  return_from_SYSCALL_64+0x0/0x65

[   16.789357] Freed by task 2541:
[   16.789362]  kasan_slab_free+0x71/0xc0
[   16.789365]  kfree+0x88/0x1b0
[   16.789369]  amdgpu_driver_postclose_kms+0x469/0x860
[   16.789373]  drm_release+0x8a8/0x1180
[   16.789377]  __fput+0x2ab/0x730
[   16.789380]  task_work_run+0x14b/0x200
[   16.789384]  exit_to_usermode_loop+0x151/0x180
[   16.789387]  do_syscall_64+0x4ed/0x670
[   16.789391]  return_from_SYSCALL_64+0x0/0x65

[   16.789397] The buggy address belongs to the object at 8803dfe4b300
[   16.789403] The buggy address is located 200 bytes inside of
[   16.789406] The buggy address belongs to the page:
[   16.789413] page:4ccd276f count:1 mapcount:0 mapping:  
(null) index:0x0 compound_mapcount: 0
[   16.789421] flags: 0x20008100(slab|head)
[   16.789428] raw: 20008100   
0001000f000f
[   16.789433] raw: dead0100 dead0200 8803f3002a80 

[   16.789436] page dumped because: kasan: bad access detected

[   16.789441] Memory state around the buggy address:
[   16.789445]  8803dfe4b280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc 
fc
[   16.789449]  8803dfe4b300: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb 
fb

Re: [PATCH 4/4] drm/amdgpu: simplify huge page handling

2018-01-03 Thread Christian König

Am 03.01.2018 um 09:29 schrieb Chunming Zhou:



On 2018年01月02日 22:47, Christian König wrote:

Update the PDEs after resetting the huge flag.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 60 
++

  1 file changed, 18 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index c1c5ccdee783..81505870eebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -946,54 +946,38 @@ static void amdgpu_vm_handle_huge_pages(struct 
amdgpu_pte_update_params *p,

  unsigned nptes, uint64_t dst,
  uint64_t flags)
  {
-    bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
  uint64_t pd_addr, pde;
    /* In the case of a mixed PT the PDE must point to it*/
-    if (p->adev->asic_type < CHIP_VEGA10 || p->src ||
-    nptes != AMDGPU_VM_PTE_COUNT(p->adev)) {
-    dst = amdgpu_bo_gpu_offset(entry->base.bo);
-    flags = AMDGPU_PTE_VALID;
-    } else {
+    if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
+    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
  /* Set the huge page flag to stop scanning at this PDE */
  flags |= AMDGPU_PDE_PTE;
  }
  -    if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
+    if (!(flags & AMDGPU_PDE_PTE)) {
+    if (entry->huge) {
+    /* Add the entry to the relocated list to update it. */
+    entry->huge = false;
+    spin_lock(&p->vm->status_lock);
+    list_move(&entry->base.vm_status, &p->vm->relocated);
+    spin_unlock(&p->vm->status_lock);
+    }
  return;
-    entry->huge = !!(flags & AMDGPU_PDE_PTE);
+    }
  +    entry->huge = true;
  amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
 &dst, &flags);
  -    if (use_cpu_update) {
-    /* In case a huge page is replaced with a system
- * memory mapping, p->pages_addr != NULL and
- * amdgpu_vm_cpu_set_ptes would try to translate dst
- * through amdgpu_vm_map_gart. But dst is already a
- * GPU address (of the page table). Disable
- * amdgpu_vm_map_gart temporarily.
- */
-    dma_addr_t *tmp;
-
-    tmp = p->pages_addr;
-    p->pages_addr = NULL;
-
-    pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
-    pde = pd_addr + (entry - parent->entries) * 8;
-    amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
-
-    p->pages_addr = tmp;
-    } else {
-    if (parent->base.bo->shadow) {
-    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
-    pde = pd_addr + (entry - parent->entries) * 8;
-    amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
-    }
-    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+    if (parent->base.bo->shadow) {
+    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
  pde = pd_addr + (entry - parent->entries) * 8;
-    amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+    p->func(p, pde, dst, 1, 0, flags);
  }
+    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+    pde = pd_addr + (entry - parent->entries) * 8;
+    p->func(p, pde, dst, 1, 0, flags);
  }
    /**
@@ -1205,12 +1189,6 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,

  /* padding, etc. */
  ndw = 64;
  -    /* one PDE write for each huge page */
-    if (vm->root.base.bo->shadow)
-    ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2;
-    else
-    ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
I didn't get why removing this, although you put updating PDE after 
huge flag resets, for huge flag no-reset case, we still do huge page 
PDE update, don't we?


Yeah, but we don't update the PTEs any more. So we have plenty of free 
space in the IB for the PDE update.


Regards,
Christian.



Regards,
David Zhou

-
  if (pages_addr) {
  /* copy commands needed */
  ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
@@ -1285,8 +1263,6 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,

    error_free:
  amdgpu_job_free(job);
-    amdgpu_vm_invalidate_level(adev, vm, &vm->root,
-   adev->vm_manager.root_level);
  return r;
  }




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/4] drm/amdgpu: minor optimize VM moved handling v2

2018-01-03 Thread Christian König

Am 03.01.2018 um 09:10 schrieb Zhou, David(ChunMing):


On 2018年01月02日 22:47, Christian König wrote:

Try to lock moved BOs if it's successful we can update the
PTEs directly to the new location.

v2: rebase

Signed-off-by: Christian König 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++-
   1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3632c69f1814..c1c5ccdee783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1697,18 +1697,31 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
struct amdgpu_bo_va *bo_va;
+   struct reservation_object *resv;
   
   		bo_va = list_first_entry(&vm->moved,

struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
   
+		resv = bo_va->base.bo->tbo.resv;

+
/* Per VM BOs never need to bo cleared in the page tables */

This reminders us Per-VM-BOs need to cleared as well after we allow to
evict/swap out per-vm-bos.


Actually they don't. The page tables only need to be valid during CS.

So what happens is that the per VM-BOs are validated in right before we 
call amdgpu_vm_handle_moved().


Regards,
Christian.



Regards,
David Zhou

-   clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
+   if (resv == vm->root.base.bo->tbo.resv)
+   clear = false;
+   /* Try to reserve the BO to avoid clearing its ptes */
+   else if (reservation_object_trylock(resv))
+   clear = false;
+   /* Somebody else is using the BO right now */
+   else
+   clear = true;
   
   		r = amdgpu_vm_bo_update(adev, bo_va, clear);

if (r)
return r;
   
+		if (!clear && resv != vm->root.base.bo->tbo.resv)

+   reservation_object_unlock(resv);
+
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: BUG: KASAN: use-after-free in amdgpu_job_free_cb

2018-01-03 Thread Johannes Hirte
On 2018 Jan 03, Johannes Hirte wrote:
> This should be fixed already with 
> https://lists.freedesktop.org/archives/amd-gfx/2017-October/014932.html
> but's still missing upstream.
> 

With this patch, the use-after-free in amdgpu_job_free_cb seems to be
gone. But now I get an use-after-free in
drm_atomic_helper_wait_for_flip_done:

[89387.069387] 
==
[89387.069407] BUG: KASAN: use-after-free in 
drm_atomic_helper_wait_for_flip_done+0x24f/0x270
[89387.069413] Read of size 8 at addr 880124df0688 by task 
kworker/u8:3/31426

[89387.069423] CPU: 1 PID: 31426 Comm: kworker/u8:3 Not tainted 
4.15.0-rc6-1-ge0895ba8d88e #442
[89387.069427] Hardware name: HP HP ProBook 645 G2/80FE, BIOS N77 Ver. 01.10 
10/12/2017
[89387.069435] Workqueue: events_unbound commit_work
[89387.069440] Call Trace:
[89387.069448]  dump_stack+0x99/0x11e
[89387.069453]  ? _atomic_dec_and_lock+0x152/0x152
[89387.069460]  print_address_description+0x65/0x270
[89387.069465]  kasan_report+0x272/0x360
[89387.069470]  ? drm_atomic_helper_wait_for_flip_done+0x24f/0x270
[89387.069475]  drm_atomic_helper_wait_for_flip_done+0x24f/0x270
[89387.069483]  amdgpu_dm_atomic_commit_tail+0x185e/0x2b90
[89387.069492]  ? dm_crtc_duplicate_state+0x130/0x130
[89387.069498]  ? drm_atomic_helper_wait_for_dependencies+0x3f2/0x800
[89387.069504]  commit_tail+0x92/0xe0
[89387.069511]  process_one_work+0x84b/0x1600
[89387.069517]  ? tick_nohz_dep_clear_signal+0x20/0x20
[89387.069522]  ? _raw_spin_unlock_irq+0xbe/0x120
[89387.069525]  ? _raw_spin_unlock+0x120/0x120
[89387.069529]  ? pwq_dec_nr_in_flight+0x3c0/0x3c0
[89387.069534]  ? arch_vtime_task_switch+0xee/0x190
[89387.069539]  ? finish_task_switch+0x27d/0x7f0
[89387.069542]  ? wq_worker_waking_up+0xc0/0xc0
[89387.069547]  ? copy_overflow+0x20/0x20
[89387.069550]  ? sched_clock_cpu+0x18/0x1e0
[89387.069558]  ? pci_mmcfg_check_reserved+0x100/0x100
[89387.069562]  ? pci_mmcfg_check_reserved+0x100/0x100
[89387.069569]  ? schedule+0xfb/0x3b0
[89387.069574]  ? __schedule+0x19b0/0x19b0
[89387.069578]  ? _raw_spin_unlock_irq+0xb9/0x120
[89387.069582]  ? _raw_spin_unlock_irq+0xbe/0x120
[89387.069585]  ? _raw_spin_unlock+0x120/0x120
[89387.069590]  worker_thread+0x211/0x1790
[89387.069597]  ? pick_next_task_fair+0x313/0x10f0
[89387.069601]  ? trace_event_raw_event_workqueue_work+0x170/0x170
[89387.069606]  ? __read_once_size_nocheck.constprop.6+0x10/0x10
[89387.069612]  ? tick_nohz_dep_clear_signal+0x20/0x20
[89387.069616]  ? account_idle_time+0x94/0x1f0
[89387.069620]  ? _raw_spin_unlock_irq+0xbe/0x120
[89387.069623]  ? _raw_spin_unlock+0x120/0x120
[89387.069628]  ? finish_task_switch+0x27d/0x7f0
[89387.069633]  ? sched_clock_cpu+0x18/0x1e0
[89387.069639]  ? ret_from_fork+0x1f/0x30
[89387.069644]  ? pci_mmcfg_check_reserved+0x100/0x100
[89387.069650]  ? cyc2ns_read_end+0x20/0x20
[89387.069657]  ? schedule+0xfb/0x3b0
[89387.069662]  ? __schedule+0x19b0/0x19b0
[89387.069666]  ? remove_wait_queue+0x2b0/0x2b0
[89387.069670]  ? arch_vtime_task_switch+0xee/0x190
[89387.069675]  ? _raw_spin_unlock_irqrestore+0xc2/0x130
[89387.069679]  ? _raw_spin_unlock_irq+0x120/0x120
[89387.069683]  ? trace_event_raw_event_workqueue_work+0x170/0x170
[89387.069688]  kthread+0x2d4/0x390
[89387.069693]  ? kthread_create_worker+0xd0/0xd0
[89387.069697]  ret_from_fork+0x1f/0x30

[89387.069705] Allocated by task 2387:
[89387.069712]  kasan_kmalloc+0xa0/0xd0
[89387.069717]  kmem_cache_alloc_trace+0xd1/0x1e0
[89387.069722]  dm_crtc_duplicate_state+0x73/0x130
[89387.069726]  drm_atomic_get_crtc_state+0x13c/0x400
[89387.069730]  page_flip_common+0x52/0x230
[89387.069734]  drm_atomic_helper_page_flip+0xa1/0x100
[89387.069739]  drm_mode_page_flip_ioctl+0xc10/0x1030
[89387.069744]  drm_ioctl_kernel+0x1b5/0x2c0
[89387.069748]  drm_ioctl+0x709/0xa00
[89387.069752]  amdgpu_drm_ioctl+0x118/0x280
[89387.069756]  do_vfs_ioctl+0x18a/0x1260
[89387.069760]  SyS_ioctl+0x6f/0x80
[89387.069764]  do_syscall_64+0x220/0x670
[89387.069768]  return_from_SYSCALL_64+0x0/0x65

[89387.069772] Freed by task 2533:
[89387.069776]  kasan_slab_free+0x71/0xc0
[89387.069780]  kfree+0x88/0x1b0
[89387.069784]  drm_atomic_state_default_clear+0x2c8/0xa00
[89387.069787]  __drm_atomic_state_free+0x30/0xd0
[89387.069791]  drm_atomic_helper_update_plane+0xb6/0x350
[89387.069794]  __setplane_internal+0x5b4/0x9d0
[89387.069798]  drm_mode_cursor_universal+0x412/0xc60
[89387.069801]  drm_mode_cursor_common+0x4b6/0x890
[89387.069805]  drm_mode_cursor_ioctl+0xd3/0x120
[89387.069809]  drm_ioctl_kernel+0x1b5/0x2c0
[89387.069813]  drm_ioctl+0x709/0xa00
[89387.069816]  amdgpu_drm_ioctl+0x118/0x280
[89387.069819]  do_vfs_ioctl+0x18a/0x1260
[89387.069822]  SyS_ioctl+0x6f/0x80
[89387.069824]  do_syscall_64+0x220/0x670
[89387.069828]  return_from_SYSCALL_64+0x0/0x65

[89387.069834] The buggy address belongs to the object at 880124df0480
[89387.069839] The buggy address is located 520 bytes inside of
[89387.069843] The buggy addres

Re: Deadlocks with multiple applications on AMD RX 460 and RX 550 - Update 2

2018-01-03 Thread Christian König

Hi Luis,

In general please add information like /proc/iomem and dmesg as 
attachment and not mangled inside the mail.


The good news is that your ARM board at least has a memory layout which 
should work in theory. So at least one problem rules out.


I don't think that apitrace would be much helpful in this case as long 
as no developer has access to one of those ARM boards. But it is 
interesting that the apitrace reliable reproduces the issue. This means 
that it isn't something random, but rather a specific timing of things.


Regards,
Christian.

Am 03.01.2018 um 01:36 schrieb Luís Mendes:

Just a small update, regarding to what I have posted...

I've made additional tests with mesa-17.4 at commit "radv: Implement
binning on GFX9" - 6a36bfc64d2096aa338958c4605f5fc6372c07b8 and I was
able to gather a smaller apitrace of kodi playing a video with about
1GB that hangs the GPU, almost always, when replayed with glretrace if
without the option --singlethread. If option --singlethread is used,
when doing glretrace, no gpu hang occurs, ever, it seems.

For some reason now I am getting past the lightdm login screen without
issues, maybe some of the suggested changes improved the behaviour
with mesa-17.4, however with mesa-17.3.1 I didn't have those issue
anyway.

Now both mesa-17.3.1 and mesa-17.4 behave similarly, blocking while
playing video with kodi, but is also possible to cause the gpu hang
with other applications.
On the other hand pure openGL application seem to work fine... I am
able to run glmark2 tests without issues.

How can I send these apitraces?

On Tue, Jan 2, 2018 at 10:29 PM, Luís Mendes  wrote:

Ok... I've done some of the suggested tests.

I still haven't tested on x86, but I'll get to that.

I've recompiled the kernel to disable Power Management as much as
possible at all levels, including the PCIe, I've also modified
/include/drm/drm_cache.h - static inline bool
drm_arch_can_wc_memory(void) to always return false, but neither
solved the issue.

When I run kodi under apitrace with mesa 17.3.1 it becomes much more
difficult to reproduce the crash, there are a lot of missed frames due
to the CPU overload of apitrace, but I was to able to crash the GPU
once. The apitrace log has 2.3GB, how should I send it?
It happened while playing a VP9 encoded webm video file, which is
decoded by software, as RX 460 is unable to hardware decode this codec
AFAIK. In fact software decoded videos are more prone to produce the
GPU hang, while a H265 4K hardware decoded video never causes a GPU
hang. I'm affraid I forgot to have kodi to log the execution data when
I did the apitrace.


The full dmesg is presented below as well as the /proc/iomem
information and lspci output.
  I just want to note that I'm having EDID DDC errors with my TV
screen, because at some point in kernel 4.14 onwards, both the RX460
as well as the RX550 cards started to corrupt the I2C TV screen EDID
memory, so that I have to reflash the correct EDID data to get the
screen back to its own configuration. This is a rare problem that only
occurs with this TV. All other TVs and monitors that I've tested don't
show this EDID corruption issue. I currently have stopped to reflash
the I2C EDID configuration memory of my TV to avoid exceeding the
memory write cycles endurance, instead I now modify gpu/drm/drm_edid.c
in function drm_do_get_edid() to allow the corrupted EDID to pass and
enter X. So please ignore the EDID error warnings on my dmesg log. The
GPU hangs occur just the same, even when I have the correct EDID, as
it is an unrelated issue.

Regards,
Luís

iomem shows this:
-3fff : System RAM
   8000-00ef : Kernel code
   0100-010e3913 : Kernel data
d000-efff : PCI MEM
   d000-e7ff : PCI Bus :01
 d000-dfff : :01:00.0
 e000-e01f : :01:00.0
 e020-e023 : :01:00.0
 e024-e025 : :01:00.0
 e026-e0263fff : :01:00.1
   e026-e0263fff : ICH HD audio
f1010680-f10106cf : spi@10680
f1011000-f101101f : i2c@11000
f1011100-f10f : i2c@11100
f1012000-f101201f : serial
f1012100-f101211f : serial
f1018000-f101801f : pinctrl@18000
f1018100-f101813f : gpio
f1018140-f101817f : gpio
f1018454-f1018457 : conf-sdio3
f10184a0-f10184ab : rtc-soc
f1020704-f1020707 : watchdog@20300
f1020800-f102080f : cpurst@20800
f1020a00-f1020ccf : interrupt-controller@20a00
f1021070-f10210c7 : interrupt-controller@20a00
f1022000-f1022fff : pmsu@22000
f103-f1033fff : ethernet@3
f1034000-f1037fff : ethernet@34000
f104-f1041fff : pcie@2,0
f1044000-f1045fff : pcie@3,0
f1058000-f10584ff : usb@58000
f107-f1073fff : ethernet@7
f10a3800-f10a381f : rtc
f10a8000-f10a9fff : sata@a8000
f10d8000-f10d8fff : sdhci
f10e-f10e1fff : sata@e
f10e4074-f10e4077 : thermal@e8078
f10e4078-f10e407b : thermal@e8078
f10f-f10f3fff : usb3@f
f10f8000-f10fbfff : usb3@f8000
f110-f11007ff : f110.sa-sram0
f111-f11107ff : f111.sa-sram

Re: [PATCH 4/4] drm/amdgpu: simplify huge page handling

2018-01-03 Thread Chunming Zhou



On 2018年01月03日 17:24, Christian König wrote:

Am 03.01.2018 um 09:29 schrieb Chunming Zhou:



On 2018年01月02日 22:47, Christian König wrote:

Update the PDEs after resetting the huge flag.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 60 
++

  1 file changed, 18 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index c1c5ccdee783..81505870eebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -946,54 +946,38 @@ static void amdgpu_vm_handle_huge_pages(struct 
amdgpu_pte_update_params *p,

  unsigned nptes, uint64_t dst,
  uint64_t flags)
  {
-    bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
  uint64_t pd_addr, pde;
    /* In the case of a mixed PT the PDE must point to it*/
-    if (p->adev->asic_type < CHIP_VEGA10 || p->src ||
-    nptes != AMDGPU_VM_PTE_COUNT(p->adev)) {
-    dst = amdgpu_bo_gpu_offset(entry->base.bo);
-    flags = AMDGPU_PTE_VALID;
-    } else {
+    if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
+    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
  /* Set the huge page flag to stop scanning at this PDE */
  flags |= AMDGPU_PDE_PTE;
  }
  -    if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
+    if (!(flags & AMDGPU_PDE_PTE)) {
+    if (entry->huge) {
+    /* Add the entry to the relocated list to update it. */
+    entry->huge = false;
+    spin_lock(&p->vm->status_lock);
+    list_move(&entry->base.vm_status, &p->vm->relocated);
+    spin_unlock(&p->vm->status_lock);
+    }
  return;
-    entry->huge = !!(flags & AMDGPU_PDE_PTE);
+    }
  +    entry->huge = true;
  amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
 &dst, &flags);
  -    if (use_cpu_update) {
-    /* In case a huge page is replaced with a system
- * memory mapping, p->pages_addr != NULL and
- * amdgpu_vm_cpu_set_ptes would try to translate dst
- * through amdgpu_vm_map_gart. But dst is already a
- * GPU address (of the page table). Disable
- * amdgpu_vm_map_gart temporarily.
- */
-    dma_addr_t *tmp;
-
-    tmp = p->pages_addr;
-    p->pages_addr = NULL;
-
-    pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
-    pde = pd_addr + (entry - parent->entries) * 8;
-    amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
-
-    p->pages_addr = tmp;
-    } else {
-    if (parent->base.bo->shadow) {
-    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
-    pde = pd_addr + (entry - parent->entries) * 8;
-    amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
-    }
-    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+    if (parent->base.bo->shadow) {
+    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
  pde = pd_addr + (entry - parent->entries) * 8;
-    amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+    p->func(p, pde, dst, 1, 0, flags);
  }
+    pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+    pde = pd_addr + (entry - parent->entries) * 8;
+    p->func(p, pde, dst, 1, 0, flags);
  }
    /**
@@ -1205,12 +1189,6 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,

  /* padding, etc. */
  ndw = 64;
  -    /* one PDE write for each huge page */
-    if (vm->root.base.bo->shadow)
-    ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2;
-    else
-    ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
I didn't get why removing this, although you put updating PDE after 
huge flag resets, for huge flag no-reset case, we still do huge page 
PDE update, don't we?


Yeah, but we don't update the PTEs any more. So we have plenty of free 
space in the IB for the PDE update.

OK, got it, thanks for explain.

patch#3 and #4 are Reviewed-by: Chunming Zhou 



Regards,
Christian.



Regards,
David Zhou

-
  if (pages_addr) {
  /* copy commands needed */
  ndw += ncmds * 
adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
@@ -1285,8 +1263,6 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,

    error_free:
  amdgpu_job_free(job);
-    amdgpu_vm_invalidate_level(adev, vm, &vm->root,
-   adev->vm_manager.root_level);
  return r;
  }






___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/4] drm/amdgpu: minor optimize VM moved handling v2

2018-01-03 Thread Chunming Zhou



On 2018年01月03日 17:25, Christian König wrote:

Am 03.01.2018 um 09:10 schrieb Zhou, David(ChunMing):


On 2018年01月02日 22:47, Christian König wrote:

Try to lock moved BOs if it's successful we can update the
PTEs directly to the new location.

v2: rebase

Signed-off-by: Christian König 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++-
   1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 3632c69f1814..c1c5ccdee783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1697,18 +1697,31 @@ int amdgpu_vm_handle_moved(struct 
amdgpu_device *adev,

   spin_lock(&vm->status_lock);
   while (!list_empty(&vm->moved)) {
   struct amdgpu_bo_va *bo_va;
+    struct reservation_object *resv;
      bo_va = list_first_entry(&vm->moved,
   struct amdgpu_bo_va, base.vm_status);
   spin_unlock(&vm->status_lock);
   +    resv = bo_va->base.bo->tbo.resv;
+
   /* Per VM BOs never need to bo cleared in the page tables */

This reminders us Per-VM-BOs need to cleared as well after we allow to
evict/swap out per-vm-bos.


Actually they don't. The page tables only need to be valid during CS.

So what happens is that the per VM-BOs are validated in right before 
we call amdgpu_vm_handle_moved().
Yeah, agree it for per-vm-bo situation after I checked all adding moved 
list cases:

1. validate pt bos
2. bo invalidate
3. insert_map for per-vm-bo
item #1 and #3 both are per-vm-bo, they are already validated before 
handle_moved().


For item #2, there are three places to call it:
a. amdgpu_bo_vm_update_pte in CS for amdgpu_vm_debug
b. amdgpu_gem_op_ioctl, but it is for evicted list, nothing with moved list.
c. amdgpu_bo_move_notify when bo validate.

For c case, your optimization is valid, we don't need clear for validate bo.
But for a case, yours will break amdgpu_vm_debug functionality.

Right?

Regards,
David Zhou



Regards,
Christian.



Regards,
David Zhou
-    clear = bo_va->base.bo->tbo.resv != 
vm->root.base.bo->tbo.resv;

+    if (resv == vm->root.base.bo->tbo.resv)
+    clear = false;
+    /* Try to reserve the BO to avoid clearing its ptes */
+    else if (reservation_object_trylock(resv))
+    clear = false;
+    /* Somebody else is using the BO right now */
+    else
+    clear = true;
      r = amdgpu_vm_bo_update(adev, bo_va, clear);
   if (r)
   return r;
   +    if (!clear && resv != vm->root.base.bo->tbo.resv)
+    reservation_object_unlock(resv);
+
   spin_lock(&vm->status_lock);
   }
   spin_unlock(&vm->status_lock);




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/4] drm/amdgpu: minor optimize VM moved handling v2

2018-01-03 Thread Christian König

Am 03.01.2018 um 11:43 schrieb Chunming Zhou:



On 2018年01月03日 17:25, Christian König wrote:

Am 03.01.2018 um 09:10 schrieb Zhou, David(ChunMing):


On 2018年01月02日 22:47, Christian König wrote:

Try to lock moved BOs if it's successful we can update the
PTEs directly to the new location.

v2: rebase

Signed-off-by: Christian König 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++-
   1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 3632c69f1814..c1c5ccdee783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1697,18 +1697,31 @@ int amdgpu_vm_handle_moved(struct 
amdgpu_device *adev,

   spin_lock(&vm->status_lock);
   while (!list_empty(&vm->moved)) {
   struct amdgpu_bo_va *bo_va;
+    struct reservation_object *resv;
      bo_va = list_first_entry(&vm->moved,
   struct amdgpu_bo_va, base.vm_status);
   spin_unlock(&vm->status_lock);
   +    resv = bo_va->base.bo->tbo.resv;
+
   /* Per VM BOs never need to bo cleared in the page 
tables */

This reminders us Per-VM-BOs need to cleared as well after we allow to
evict/swap out per-vm-bos.


Actually they don't. The page tables only need to be valid during CS.

So what happens is that the per VM-BOs are validated in right before 
we call amdgpu_vm_handle_moved().
Yeah, agree it for per-vm-bo situation after I checked all adding 
moved list cases:

1. validate pt bos
2. bo invalidate
3. insert_map for per-vm-bo
item #1 and #3 both are per-vm-bo, they are already validated before 
handle_moved().


For item #2, there are three places to call it:
a. amdgpu_bo_vm_update_pte in CS for amdgpu_vm_debug
b. amdgpu_gem_op_ioctl, but it is for evicted list, nothing with moved 
list.

c. amdgpu_bo_move_notify when bo validate.

For c case, your optimization is valid, we don't need clear for 
validate bo.

But for a case, yours will break amdgpu_vm_debug functionality.

Right?


Interesting point, but no that should be handled as well.

The vm_debug handling is only for the BOs on the BO-list. E.g. per VM 
BOs are never handled here.


Regards,
Christian.



Regards,
David Zhou



Regards,
Christian.



Regards,
David Zhou
-    clear = bo_va->base.bo->tbo.resv != 
vm->root.base.bo->tbo.resv;

+    if (resv == vm->root.base.bo->tbo.resv)
+    clear = false;
+    /* Try to reserve the BO to avoid clearing its ptes */
+    else if (reservation_object_trylock(resv))
+    clear = false;
+    /* Somebody else is using the BO right now */
+    else
+    clear = true;
      r = amdgpu_vm_bo_update(adev, bo_va, clear);
   if (r)
   return r;
   +    if (!clear && resv != vm->root.base.bo->tbo.resv)
+    reservation_object_unlock(resv);
+
   spin_lock(&vm->status_lock);
   }
   spin_unlock(&vm->status_lock);






___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: Deadlocks with multiple applications on AMD RX 460 and RX 550 - Update 2

2018-01-03 Thread Luís Mendes
Hi Christian,

Replies follow in between.

Regards,
Luís

On Wed, Jan 3, 2018 at 9:37 AM, Christian König
 wrote:
> Hi Luis,
>
> In general please add information like /proc/iomem and dmesg as attachment
> and not mangled inside the mail.

Ok, I'll take that into account next time. Sorry for the inconvenience.

>
> The good news is that your ARM board at least has a memory layout which
> should work in theory. So at least one problem rules out.

Ok, nice.

>
> I don't think that apitrace would be much helpful in this case as long as no
> developer has access to one of those ARM boards. But it is interesting that
> the apitrace reliable reproduces the issue. This means that it isn't
> something random, but rather a specific timing of things.

I am afraid, I currently don't have boards that I can send yet. I am
developing one, but it will still take some time, before I have one
ready.

I've checked the apitrace and there is a common call
glXSwapBuffers(dpy=0x1389f00, drawable=52428803) that I believe will
trigger the page flip. I suspect there is a race condition with
glXSwapBuffers in mesa or amdgpu, that corrupts some of the data sent
to the GPU causing an hang.
What I believe it seems to be the case is that the GPU lock up only
happens when doing a page flip, since the kernel locks with:
[  243.693200] kworker/u4:3D089  2 0x
[  243.693232] Workqueue: events_unbound commit_work [drm_kms_helper]
[  243.693251] [<80b8c6d4>] (__schedule) from [<80b8cdd0>] (schedule+0x4c/0xac)
[  243.693259] [<80b8cdd0>] (schedule) from [<80b91024>]
(schedule_timeout+0x228/0x444)
[  243.693270] [<80b91024>] (schedule_timeout) from [<80886738>]
(dma_fence_default_wait+0x2b4/0x2d8)
[  243.693276] [<80886738>] (dma_fence_default_wait) from [<80885d60>]
(dma_fence_wait_timeout+0x40/0x150)
[  243.693284] [<80885d60>] (dma_fence_wait_timeout) from [<80887b1c>]
(reservation_object_wait_timeout_rcu+0xfc/0x34c)
[  243.693509] [<80887b1c>] (reservation_object_wait_timeout_rcu) from
[<7f331988>] (amdgpu_dm_do_flip+0xec/0x36c [amdgpu])
[  243.693789] [<7f331988>] (amdgpu_dm_do_flip [amdgpu]) from
[<7f33309c>] (amdgpu_dm_atomic_commit_tail+0xbfc/0xe58 [amdgpu])
...

I will try to reproduce this on x86 with a similar software stack...
and the apitrace traces I got.
What do you think, does this makes sense? Do you have further
suggestions that may help pin down the problem?

Another strange thing... the traces that were consistently causing
hangs yesterday, today are having a bit more difficulty causing them,
but if I play the video with kodi it hangs easily again. Both kodi and
glretarce always hangs with similar kernel backtraces, like the one
above.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: Deadlocks with multiple applications on AMD RX 460 and RX 550 - Update 2

2018-01-03 Thread Luís Mendes
Hi Christian, David,

David, replying to your question... The issue is indeed reproducible
on x86, I just did it with kodi and the same VP9 video. So it is not
arm specific.

Regards,
Luís

On Wed, Jan 3, 2018 at 11:02 AM, Luís Mendes  wrote:
> Hi Christian,
>
> Replies follow in between.
>
> Regards,
> Luís
>
> On Wed, Jan 3, 2018 at 9:37 AM, Christian König
>  wrote:
>> Hi Luis,
>>
>> In general please add information like /proc/iomem and dmesg as attachment
>> and not mangled inside the mail.
>
> Ok, I'll take that into account next time. Sorry for the inconvenience.
>
>>
>> The good news is that your ARM board at least has a memory layout which
>> should work in theory. So at least one problem rules out.
>
> Ok, nice.
>
>>
>> I don't think that apitrace would be much helpful in this case as long as no
>> developer has access to one of those ARM boards. But it is interesting that
>> the apitrace reliable reproduces the issue. This means that it isn't
>> something random, but rather a specific timing of things.
>
> I am afraid, I currently don't have boards that I can send yet. I am
> developing one, but it will still take some time, before I have one
> ready.
>
> I've checked the apitrace and there is a common call
> glXSwapBuffers(dpy=0x1389f00, drawable=52428803) that I believe will
> trigger the page flip. I suspect there is a race condition with
> glXSwapBuffers in mesa or amdgpu, that corrupts some of the data sent
> to the GPU causing an hang.
> What I believe it seems to be the case is that the GPU lock up only
> happens when doing a page flip, since the kernel locks with:
> [  243.693200] kworker/u4:3D089  2 0x
> [  243.693232] Workqueue: events_unbound commit_work [drm_kms_helper]
> [  243.693251] [<80b8c6d4>] (__schedule) from [<80b8cdd0>] 
> (schedule+0x4c/0xac)
> [  243.693259] [<80b8cdd0>] (schedule) from [<80b91024>]
> (schedule_timeout+0x228/0x444)
> [  243.693270] [<80b91024>] (schedule_timeout) from [<80886738>]
> (dma_fence_default_wait+0x2b4/0x2d8)
> [  243.693276] [<80886738>] (dma_fence_default_wait) from [<80885d60>]
> (dma_fence_wait_timeout+0x40/0x150)
> [  243.693284] [<80885d60>] (dma_fence_wait_timeout) from [<80887b1c>]
> (reservation_object_wait_timeout_rcu+0xfc/0x34c)
> [  243.693509] [<80887b1c>] (reservation_object_wait_timeout_rcu) from
> [<7f331988>] (amdgpu_dm_do_flip+0xec/0x36c [amdgpu])
> [  243.693789] [<7f331988>] (amdgpu_dm_do_flip [amdgpu]) from
> [<7f33309c>] (amdgpu_dm_atomic_commit_tail+0xbfc/0xe58 [amdgpu])
> ...
>
> I will try to reproduce this on x86 with a similar software stack...
> and the apitrace traces I got.
> What do you think, does this makes sense? Do you have further
> suggestions that may help pin down the problem?
>
> Another strange thing... the traces that were consistently causing
> hangs yesterday, today are having a bit more difficulty causing them,
> but if I play the video with kodi it hangs easily again. Both kodi and
> glretarce always hangs with similar kernel backtraces, like the one
> above.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/4] drm/amdgpu: minor optimize VM moved handling v2

2018-01-03 Thread Zhou, David(ChunMing)
 +else if (reservation_object_trylock(resv))
 +clear = false;

this will effect bo in bo list,wont it?


发自坚果 Pro

Koenig, Christian  于 2018年1月3日 下午6:47写道:

Am 03.01.2018 um 11:43 schrieb Chunming Zhou:
>
>
> On 2018年01月03日 17:25, Christian König wrote:
>> Am 03.01.2018 um 09:10 schrieb Zhou, David(ChunMing):
>>>
>>> On 2018年01月02日 22:47, Christian König wrote:
 Try to lock moved BOs if it's successful we can update the
 PTEs directly to the new location.

 v2: rebase

 Signed-off-by: Christian König 
 ---
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++-
1 file changed, 14 insertions(+), 1 deletion(-)

 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 index 3632c69f1814..c1c5ccdee783 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 @@ -1697,18 +1697,31 @@ int amdgpu_vm_handle_moved(struct
 amdgpu_device *adev,
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
struct amdgpu_bo_va *bo_va;
 +struct reservation_object *resv;
   bo_va = list_first_entry(&vm->moved,
struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
+resv = bo_va->base.bo->tbo.resv;
 +
/* Per VM BOs never need to bo cleared in the page
 tables */
>>> This reminders us Per-VM-BOs need to cleared as well after we allow to
>>> evict/swap out per-vm-bos.
>>
>> Actually they don't. The page tables only need to be valid during CS.
>>
>> So what happens is that the per VM-BOs are validated in right before
>> we call amdgpu_vm_handle_moved().
> Yeah, agree it for per-vm-bo situation after I checked all adding
> moved list cases:
> 1. validate pt bos
> 2. bo invalidate
> 3. insert_map for per-vm-bo
> item #1 and #3 both are per-vm-bo, they are already validated before
> handle_moved().
>
> For item #2, there are three places to call it:
> a. amdgpu_bo_vm_update_pte in CS for amdgpu_vm_debug
> b. amdgpu_gem_op_ioctl, but it is for evicted list, nothing with moved
> list.
> c. amdgpu_bo_move_notify when bo validate.
>
> For c case, your optimization is valid, we don't need clear for
> validate bo.
> But for a case, yours will break amdgpu_vm_debug functionality.
>
> Right?

Interesting point, but no that should be handled as well.

The vm_debug handling is only for the BOs on the BO-list. E.g. per VM
BOs are never handled here.

Regards,
Christian.

>
> Regards,
> David Zhou
>
>>
>> Regards,
>> Christian.
>>
>>>
>>> Regards,
>>> David Zhou
 -clear = bo_va->base.bo->tbo.resv !=
 vm->root.base.bo->tbo.resv;
 +if (resv == vm->root.base.bo->tbo.resv)
 +clear = false;
 +/* Try to reserve the BO to avoid clearing its ptes */
 +else if (reservation_object_trylock(resv))
 +clear = false;
 +/* Somebody else is using the BO right now */
 +else
 +clear = true;
   r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
+if (!clear && resv != vm->root.base.bo->tbo.resv)
 +reservation_object_unlock(resv);
 +
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
>>
>

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: Deadlocks with multiple applications on AMD RX 460 and RX 550 - Update 2

2018-01-03 Thread Christian König
In this case please open a bug report on fdo and describe exactly how to 
reproduce it.


Marek should be able to take a look then.

Thanks,
Christian.

Am 03.01.2018 um 12:56 schrieb Luís Mendes:

Hi Christian, David,

David, replying to your question... The issue is indeed reproducible
on x86, I just did it with kodi and the same VP9 video. So it is not
arm specific.

Regards,
Luís

On Wed, Jan 3, 2018 at 11:02 AM, Luís Mendes  wrote:

Hi Christian,

Replies follow in between.

Regards,
Luís

On Wed, Jan 3, 2018 at 9:37 AM, Christian König
 wrote:

Hi Luis,

In general please add information like /proc/iomem and dmesg as attachment
and not mangled inside the mail.

Ok, I'll take that into account next time. Sorry for the inconvenience.


The good news is that your ARM board at least has a memory layout which
should work in theory. So at least one problem rules out.

Ok, nice.


I don't think that apitrace would be much helpful in this case as long as no
developer has access to one of those ARM boards. But it is interesting that
the apitrace reliable reproduces the issue. This means that it isn't
something random, but rather a specific timing of things.

I am afraid, I currently don't have boards that I can send yet. I am
developing one, but it will still take some time, before I have one
ready.

I've checked the apitrace and there is a common call
glXSwapBuffers(dpy=0x1389f00, drawable=52428803) that I believe will
trigger the page flip. I suspect there is a race condition with
glXSwapBuffers in mesa or amdgpu, that corrupts some of the data sent
to the GPU causing an hang.
What I believe it seems to be the case is that the GPU lock up only
happens when doing a page flip, since the kernel locks with:
[  243.693200] kworker/u4:3D089  2 0x
[  243.693232] Workqueue: events_unbound commit_work [drm_kms_helper]
[  243.693251] [<80b8c6d4>] (__schedule) from [<80b8cdd0>] (schedule+0x4c/0xac)
[  243.693259] [<80b8cdd0>] (schedule) from [<80b91024>]
(schedule_timeout+0x228/0x444)
[  243.693270] [<80b91024>] (schedule_timeout) from [<80886738>]
(dma_fence_default_wait+0x2b4/0x2d8)
[  243.693276] [<80886738>] (dma_fence_default_wait) from [<80885d60>]
(dma_fence_wait_timeout+0x40/0x150)
[  243.693284] [<80885d60>] (dma_fence_wait_timeout) from [<80887b1c>]
(reservation_object_wait_timeout_rcu+0xfc/0x34c)
[  243.693509] [<80887b1c>] (reservation_object_wait_timeout_rcu) from
[<7f331988>] (amdgpu_dm_do_flip+0xec/0x36c [amdgpu])
[  243.693789] [<7f331988>] (amdgpu_dm_do_flip [amdgpu]) from
[<7f33309c>] (amdgpu_dm_atomic_commit_tail+0xbfc/0xe58 [amdgpu])
...

I will try to reproduce this on x86 with a similar software stack...
and the apitrace traces I got.
What do you think, does this makes sense? Do you have further
suggestions that may help pin down the problem?

Another strange thing... the traces that were consistently causing
hangs yesterday, today are having a bit more difficulty causing them,
but if I play the video with kodi it hangs easily again. Both kodi and
glretarce always hangs with similar kernel backtraces, like the one
above.

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/4] drm/amdgpu: minor optimize VM moved handling v2

2018-01-03 Thread Christian König

Am 03.01.2018 um 12:57 schrieb Zhou, David(ChunMing):

 +else if (reservation_object_trylock(resv))
 +clear = false;

this will effect bo in bo list,wont it?


Ah, now I get what you mean.

Yeah, that should be fixed or otherwise it will completely disable 
vm_debug support.


Going to send a patch,
Christian.



发自坚果 Pro

Koenig, Christian  于 2018年1月3日 
下午6:47写道:


Am 03.01.2018 um 11:43 schrieb Chunming Zhou:
>
>
> On 2018年01月03日 17:25, Christian König wrote:
>> Am 03.01.2018 um 09:10 schrieb Zhou, David(ChunMing):
>>>
>>> On 2018年01月02日 22:47, Christian König wrote:
 Try to lock moved BOs if it's successful we can update the
 PTEs directly to the new location.

 v2: rebase

 Signed-off-by: Christian König 
 ---
    drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++-
    1 file changed, 14 insertions(+), 1 deletion(-)

 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 index 3632c69f1814..c1c5ccdee783 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
 @@ -1697,18 +1697,31 @@ int amdgpu_vm_handle_moved(struct
 amdgpu_device *adev,
    spin_lock(&vm->status_lock);
    while (!list_empty(&vm->moved)) {
    struct amdgpu_bo_va *bo_va;
 +    struct reservation_object *resv;
       bo_va = list_first_entry(&vm->moved,
    struct amdgpu_bo_va, base.vm_status);
 spin_unlock(&vm->status_lock);
    +    resv = bo_va->base.bo->tbo.resv;
 +
    /* Per VM BOs never need to bo cleared in the page
 tables */
>>> This reminders us Per-VM-BOs need to cleared as well after we allow to
>>> evict/swap out per-vm-bos.
>>
>> Actually they don't. The page tables only need to be valid during CS.
>>
>> So what happens is that the per VM-BOs are validated in right before
>> we call amdgpu_vm_handle_moved().
> Yeah, agree it for per-vm-bo situation after I checked all adding
> moved list cases:
> 1. validate pt bos
> 2. bo invalidate
> 3. insert_map for per-vm-bo
> item #1 and #3 both are per-vm-bo, they are already validated before
> handle_moved().
>
> For item #2, there are three places to call it:
> a. amdgpu_bo_vm_update_pte in CS for amdgpu_vm_debug
> b. amdgpu_gem_op_ioctl, but it is for evicted list, nothing with moved
> list.
> c. amdgpu_bo_move_notify when bo validate.
>
> For c case, your optimization is valid, we don't need clear for
> validate bo.
> But for a case, yours will break amdgpu_vm_debug functionality.
>
> Right?

Interesting point, but no that should be handled as well.

The vm_debug handling is only for the BOs on the BO-list. E.g. per VM
BOs are never handled here.

Regards,
Christian.

>
> Regards,
> David Zhou
>
>>
>> Regards,
>> Christian.
>>
>>>
>>> Regards,
>>> David Zhou
 -    clear = bo_va->base.bo->tbo.resv !=
 vm->root.base.bo->tbo.resv;
 +    if (resv == vm->root.base.bo->tbo.resv)
 +    clear = false;
 +    /* Try to reserve the BO to avoid clearing its ptes */
 +    else if (reservation_object_trylock(resv))
 +    clear = false;
 +    /* Somebody else is using the BO right now */
 +    else
 +    clear = true;
       r = amdgpu_vm_bo_update(adev, bo_va, clear);
    if (r)
    return r;
    +    if (!clear && resv != vm->root.base.bo->tbo.resv)
 + reservation_object_unlock(resv);
 +
 spin_lock(&vm->status_lock);
    }
 spin_unlock(&vm->status_lock);
>>
>



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: optimize moved handling only when vm_debug is inactive

2018-01-03 Thread Christian König
Otherwise we would completely circumvent that debugging feature.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 81505870eebc..cd1752b6afa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1685,7 +1685,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
if (resv == vm->root.base.bo->tbo.resv)
clear = false;
/* Try to reserve the BO to avoid clearing its ptes */
-   else if (reservation_object_trylock(resv))
+   else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
clear = false;
/* Somebody else is using the BO right now */
else
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/powerplay: fix memory leakage when reload

2018-01-03 Thread Deucher, Alexander
Did you see my reply yesterday?  I reviewed it.  I also think we need to fix up 
cz, rv, and vg10.


From: Tao, Yintian
Sent: Tuesday, January 2, 2018 9:22:23 PM
To: Tao, Yintian; amd-gfx@lists.freedesktop.org; Deucher, Alexander
Subject: RE: [PATCH] drm/amd/powerplay: fix memory leakage when reload

Add Alex

-Original Message-
From: Yintian Tao [mailto:yt...@amd.com]
Sent: Monday, January 01, 2018 11:16 AM
To: amd-gfx@lists.freedesktop.org
Cc: Tao, Yintian 
Subject: [PATCH] drm/amd/powerplay: fix memory leakage when reload

add smu_free_memory when smu fini to prevent memory leakage

Change-Id: Id9103d8b54869b63f22a9af53d9fbc3b7a221191
Signed-off-by: Yintian Tao 
---
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c 
b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index c49a6f2..925217e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -607,6 +607,12 @@ int smu7_init(struct pp_smumgr *smumgr)

 int smu7_smu_fini(struct pp_smumgr *smumgr)  {
+   struct smu7_smumgr *smu_data = (struct smu7_smumgr
+*)(smumgr->backend);
+
+   smu_free_memory(smumgr->device, smu_data->header_buffer.handle);
+   if (!cgs_is_virtualization_enabled(smumgr->device))
+   smu_free_memory(smumgr->device, smu_data->smu_buffer.handle);
+
 if (smumgr->backend) {
 kfree(smumgr->backend);
 smumgr->backend = NULL;
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/pp: Supply Max DGPU clock for DC validation

2018-01-03 Thread Harry Wentland
On 2018-01-02 10:20 PM, Rex Zhu wrote:
> This patch can fix MultiGPU-Display blank
> out with 1 IGPU-4k display and 2 DGPU-two 4K
> displays.
> 
> Change-Id: I41208feb6b275d9e8b45e9ef129e19c9739107a4
> Signed-off-by: Rex Zhu 
> ---
>  drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++--
>  1 file changed, 7 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
> b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> index fa9d161..977fcdf 100644
> --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> @@ -1425,9 +1425,14 @@ static int pp_get_display_mode_validation_clocks(void 
> *handle,
>  
>   if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 
> PHM_PlatformCaps_DynamicPatchPowerState))
>   ret = phm_get_max_high_clocks(hwmgr, clocks);
> -
>   mutex_unlock(&pp_handle->pp_lock);
> - return ret;
> +
> + if (ret) {

Does phm_get_max_high_clocks fail because get_max_high_clocks is not 
implemented for dGPU?

> + clocks->memory_max_clock = pp_dpm_get_mclk(handle, false);
> + clocks->engine_max_clock = pp_dpm_get_sclk(handle, false);

Would it make more sense to keep this inside phm_get_max_high_clocks as 
fallback when get_max_high_clocks is not implemented? Should we also print a 
warning in this case?

Harry

> + }
> +
> + return 0;
>  }
>  
>  const struct amd_pm_funcs pp_dpm_funcs = {
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4.15] drm/amd/display: call set csc_default if enable adjustment is false

2018-01-03 Thread Harry Wentland
On 2017-12-31 10:17 AM, Alex Deucher wrote:
> On Fri, Dec 29, 2017 at 6:11 AM, Daniel Drake  wrote:
>> From: Yue Hin Lau 
>>
>> Signed-off-by: Yue Hin Lau 
>> Reviewed-by: Eric Bernstein 
>> Acked-by: Harry Wentland 
>> Signed-off-by: Alex Deucher 
>> [dr...@endlessm.com: backport to 4.15]
>> Signed-off-by: Daniel Drake 
>> ---
>>  drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h  | 2 +-
>>  drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c   | 6 ++
>>  drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 ++
>>  drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h   | 2 +-
>>  4 files changed, 6 insertions(+), 6 deletions(-)
>>
>> Testing Acer Aspire TC-380 engineering sample (Raven Ridge), the display
>> comes up with an excessively green tint. This patch (from 
>> amd-staging-drm-next)
>> solves the issue. Can it be included in Linux 4.15?
> 
> Looks ok to me.  Unless Harry or Leo have any objections, I'll add it
> to my queue.
> 

No objections. Thanks, Daniel.

Harry

> Thanks!
> 
> Alex
> 
> 
>>
>> diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h 
>> b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
>> index a9782b1aba47..34daf895f848 100644
>> --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
>> +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
>> @@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
>>
>>  void dpp1_cm_set_output_csc_default(
>> struct dpp *dpp_base,
>> -   const struct default_adjustment *default_adjust);
>> +   enum dc_color_space colorspace);
>>
>>  void dpp1_cm_set_gamut_remap(
>> struct dpp *dpp,
>> diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c 
>> b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
>> index 40627c244bf5..ed1216b53465 100644
>> --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
>> +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
>> @@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
>>
>>  void dpp1_cm_set_output_csc_default(
>> struct dpp *dpp_base,
>> -   const struct default_adjustment *default_adjust)
>> +   enum dc_color_space colorspace)
>>  {
>>
>> struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
>> uint32_t ocsc_mode = 0;
>>
>> -   if (default_adjust != NULL) {
>> -   switch (default_adjust->out_color_space) {
>> +   switch (colorspace) {
>> case COLOR_SPACE_SRGB:
>> case COLOR_SPACE_2020_RGB_FULLRANGE:
>> ocsc_mode = 0;
>> @@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
>> case COLOR_SPACE_UNKNOWN:
>> default:
>> break;
>> -   }
>> }
>>
>> REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
>> diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
>> b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
>> index 961ad5c3b454..05dc01e54531 100644
>> --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
>> +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
>> @@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx 
>> *pipe_ctx,
>> tbl_entry.color_space = color_space;
>> //tbl_entry.regval = matrix;
>> 
>> pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp,
>>  &tbl_entry);
>> +   } else {
>> +   
>> pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, 
>> colorspace);
>> }
>>  }
>>  static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
>> diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h 
>> b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
>> index 83a68460edcd..9420dfb94d39 100644
>> --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
>> +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
>> @@ -64,7 +64,7 @@ struct dpp_funcs {
>>
>> void (*opp_set_csc_default)(
>> struct dpp *dpp,
>> -   const struct default_adjustment *default_adjust);
>> +   enum dc_color_space colorspace);
>>
>> void (*opp_set_csc_adjustment)(
>> struct dpp *dpp,
>> --
>> 2.14.1
>>
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: PCIe3 atomics requirement for amdkfd

2018-01-03 Thread Tom Stellard
On 12/23/2017 07:40 AM, Felix Kühling wrote:
> As I understand it, it would require changes in the ROCr Runtime and in
> the firmware (MEC microcode). It also changes the programming model, so
> it may affect certain applications or higher level language runtimes
> that rely on atomic operations.
> 

How does the MEC microcode know that it is running a ROCm workload as opposed
to a graphics workload that doesn't require PCIe3 atomics.  Is there a specific
configuration bit that is set to indicate the ROCm programming model is needed?

-Tom

> Regards,
>   Felix
> 
> 
> Am 19.12.2017 um 16:04 schrieb Tom Stellard:
>> Hi,
>>
>> How hard of a requirement is PCIe3 atomics for dGPUs with the amdkfd
>> kernel driver?  Is it possible to make modifications to the runtime/kernel
>> driver to drop this requirement?
>>
>> -Tom
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 
> 

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [amd-staging-drm-next] regression - *ERROR* Don't have enable_spread_spectrum_on_ppll for v4

2018-01-03 Thread Harry Wentland
On 2017-12-27 04:04 AM, Michel Dänzer wrote:
> On 2017-12-27 05:43 AM, Dieter Nützel wrote:
>> Hello AMD team,
>>
>> I got this since latest 'amd-staging-drm-next' git update
>> (#b956c586e58a) during boot with Polaris RX580 DC on:
>>
>> [    3.586342] [drm:dal_bios_parser_init_cmd_tbl [amdgpu]] *ERROR* Don't
>> have enable_spread_spectrum_on_ppll for v4
>> [    3.586410] [drm:dal_bios_parser_init_cmd_tbl [amdgpu]] *ERROR* Don't
>> have program_clock for v7
>>
>> Latest GOOD commit was #b341a19e8039 (drm/radeon: Remove
>> KFD_CIK_SDMA_QUEUE_OFFSET).
>>
>> I'll bisect if I have some time.
>> Maybe someone send a hint to the right commit.
> 
> I don't think bisecting is useful, it'll most likely just identify
> commit 040dda2e1f52 ("drm/amd/display: Error print when ATOM BIOS
> implementation is missing") which started printing these messages for
> missing ATOM BIOS implementations.
> 
> I'm not sure, but I suspect these messages are harmless, given that
> things are otherwise presumably still working as well as they were before.

These are harmless. I wrongfully assumed that printing errors here should be 
fine on all ASICs as I assumed all relevant functions were implemented. 
Apparently they're not, proving me wrong and proving the usefulness of some 
sort of print here. I'll send a patch to demote the prints to debug level.

Harry

> 
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 00/13] remove_conflicting_framebuffers() cleanup

2018-01-03 Thread Bartlomiej Zolnierkiewicz
On Monday, November 27, 2017 11:30:44 AM Daniel Vetter wrote:
> On Fri, Nov 24, 2017 at 06:53:25PM +0100, Michał Mirosław wrote:
> > This series cleans up duplicated code for replacing firmware FB
> > driver with proper DRI driver and adds handover support to
> > Tegra driver.

Please Cc: me on and linux-fbdev ML on fbdev related patches
(I was Cc:-ed only on the cover letter and patch #10, linux-fbdev
was not Cc:-ed at all).

> > The last patch is here because it uses new semantics of
> > remove_conflicting_framebuffers() from this series. This
> > can be considered independently, though.
> 
> Except for that patches I've commented on:
> 
> Acked-by: Daniel Vetter 
> 
> Since this is for tegra and Thierry has drm-misc commit rights, it's
> probably simplest when Thierry pushes this all to drm-misc once driver
> maintainers had a chance to look at it. Also needs and ack from Bart for
> the fbdev sides.

For fbdev changes:

Acked-by: Bartlomiej Zolnierkiewicz 

Best regards,
--
Bartlomiej Zolnierkiewicz
Samsung R&D Institute Poland
Samsung Electronics

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display/dc: Demote error print to debug print when ATOM impl missing

2018-01-03 Thread Harry Wentland
I assumed wrongfully that all relevant functions should be implemented.
Apparently this isn't the case. Demote the print to debug level for now.

Signed-off-by: Harry Wentland 
---
 .../gpu/drm/amd/display/dc/bios/command_table.c| 22 +++---
 .../gpu/drm/amd/display/dc/bios/command_table2.c   | 16 
 2 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c 
b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 1aefed8cf98b..4b5fdd577848 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -387,7 +387,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
-   dm_error("Don't have transmitter_control for v%d\n", crev);
+   dm_output_to_console("Don't have transmitter_control for 
v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -911,7 +911,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
-   dm_error("Don't have set_pixel_clock for v%d\n",
+   dm_output_to_console("Don't have set_pixel_clock for v%d\n",
 BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
@@ -1230,7 +1230,7 @@ static void init_enable_spread_spectrum_on_ppll(struct 
bios_parser *bp)
enable_spread_spectrum_on_ppll_v3;
break;
default:
-   dm_error("Don't have enable_spread_spectrum_on_ppll for v%d\n",
+   dm_output_to_console("Don't have enable_spread_spectrum_on_ppll 
for v%d\n",
 
BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
break;
@@ -1427,7 +1427,7 @@ static void init_adjust_display_pll(struct bios_parser 
*bp)
bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
break;
default:
-   dm_error("Don't have adjust_display_pll for v%d\n",
+   dm_output_to_console("Don't have adjust_display_pll for v%d\n",
 BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
bp->cmd_tbl.adjust_display_pll = NULL;
break;
@@ -1702,7 +1702,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
-   dm_error("Don't have set_crtc_timing for dtd v%d\n",
+   dm_output_to_console("Don't have set_crtc_timing for 
dtd v%d\n",
 dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
@@ -1713,7 +1713,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
break;
default:
-   dm_error("Don't have set_crtc_timing for v%d\n",
+   dm_output_to_console("Don't have set_crtc_timing for 
v%d\n",
 BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
bp->cmd_tbl.set_crtc_timing = NULL;
break;
@@ -1901,7 +1901,7 @@ static void init_select_crtc_source(struct bios_parser 
*bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
-   dm_error("Don't select_crtc_source enable_crtc for v%d\n",
+   dm_output_to_console("Don't select_crtc_source enable_crtc for 
v%d\n",
 BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
@@ -2010,7 +2010,7 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
-   dm_error("Don't have enable_crtc for v%d\n",
+   dm_output_to_console("Don't have enable_crtc for v%d\n",
 BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
bp->cmd_tbl.enable_crtc = NULL;
break;
@@ -2118,7 +2118,7 @@ static void init_program_clock(struct bios_parser *bp)
bp->cmd_tbl.program_clock = program_clock_v6;
break;
default:
-   dm_error("Don't have program_clock for v%d\n",
+   dm_output_to_console("Don't have program_clock for v%d\n",
 BIOS_CMD_TABLE_PARA_

Re: [PATCH] drm/amd/amdgpu: set gtt size according to system memory size only

2018-01-03 Thread Michel Dänzer
On 2018-01-02 10:57 AM, He, Roger wrote:
> Original Message-
> From: Michel Dänzer [mailto:mic...@daenzer.net] 
> Sent: Wednesday, December 27, 2017 4:58 PM
> To: He, Roger ; Koenig, Christian 
> ; Grodzovsky, Andrey 
> Cc: amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amd/amdgpu: set gtt size according to system memory 
> size only
> 
> On 2017-12-25 09:45 AM, He, Roger wrote:
>>
>> Could you tell me how to duplicate this issue?  Maybe now I can look into it.
> 
>piglit run -x glx-multithread-texture --process-isolation false gpu 
> results/gpu.
> 
> 
> on my side, not work with above command:
> 
> root@jenkins-MS-7984:/home/jenkins/roger/piglit.0902.release# ./piglit run -x 
> glx-multithread-texture --process-isolation false gpu results/gpu
> usage: piglit [-h] [-f CONFIG_FILE] [-n ] [-d] [-t ]
>   [-x ] [-b {junit,json}] [-c | -1]
>   [-p {glx,x11_egl,wayland,gbm,mixed_glx_egl}] [--valgrind]
>   [--dmesg] [--abort-on-monitored-error] [-s]
>   [--junit_suffix JUNIT_SUFFIX] [--junit-subtests]
>   [-v | -l {quiet,verbose,dummy,http}] [--test-list TEST_LIST]
>   [-o] [--deqp-mustpass-list]
>[ ...] 
> piglit: error: unrecognized arguments: --process-isolation
> 
> is it because my piglit is too old?

Looks like it. Normally, one should always use current Git master of
piglit. If you can't or don't want to do that, you can try with just

 piglit run -x glx-multithread-texture gpu results/gpu.

but it may be less likely to reproduce the issue.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: PCIe3 atomics requirement for amdkfd

2018-01-03 Thread Liu, Shaoyun
I think currently atomic  Ops are only used in AQL package which is only 
available for ROCm , graphics workload will not use AQL package. 

Regards
Shaoyun.liu

-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Tom 
Stellard
Sent: Wednesday, January 03, 2018 9:57 AM
To: Felix Kühling; amd-gfx@lists.freedesktop.org
Cc: Kuehling, Felix
Subject: Re: PCIe3 atomics requirement for amdkfd

On 12/23/2017 07:40 AM, Felix Kühling wrote:
> As I understand it, it would require changes in the ROCr Runtime and 
> in the firmware (MEC microcode). It also changes the programming 
> model, so it may affect certain applications or higher level language 
> runtimes that rely on atomic operations.
> 

How does the MEC microcode know that it is running a ROCm workload as opposed 
to a graphics workload that doesn't require PCIe3 atomics.  Is there a specific 
configuration bit that is set to indicate the ROCm programming model is needed?

-Tom

> Regards,
>   Felix
> 
> 
> Am 19.12.2017 um 16:04 schrieb Tom Stellard:
>> Hi,
>>
>> How hard of a requirement is PCIe3 atomics for dGPUs with the amdkfd 
>> kernel driver?  Is it possible to make modifications to the 
>> runtime/kernel driver to drop this requirement?
>>
>> -Tom
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 
> 

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display/dc: Demote error print to debug print when ATOM impl missing

2018-01-03 Thread Alex Deucher
On Wed, Jan 3, 2018 at 10:18 AM, Harry Wentland  wrote:
> I assumed wrongfully that all relevant functions should be implemented.
> Apparently this isn't the case. Demote the print to debug level for now.
>
> Signed-off-by: Harry Wentland 

Acked-by: Alex Deucher 

> ---
>  .../gpu/drm/amd/display/dc/bios/command_table.c| 22 
> +++---
>  .../gpu/drm/amd/display/dc/bios/command_table2.c   | 16 
>  2 files changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c 
> b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
> index 1aefed8cf98b..4b5fdd577848 100644
> --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
> +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
> @@ -387,7 +387,7 @@ static void init_transmitter_control(struct bios_parser 
> *bp)
> bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
> break;
> default:
> -   dm_error("Don't have transmitter_control for v%d\n", crev);
> +   dm_output_to_console("Don't have transmitter_control for 
> v%d\n", crev);
> bp->cmd_tbl.transmitter_control = NULL;
> break;
> }
> @@ -911,7 +911,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
> bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
> break;
> default:
> -   dm_error("Don't have set_pixel_clock for v%d\n",
> +   dm_output_to_console("Don't have set_pixel_clock for v%d\n",
>  BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
> bp->cmd_tbl.set_pixel_clock = NULL;
> break;
> @@ -1230,7 +1230,7 @@ static void init_enable_spread_spectrum_on_ppll(struct 
> bios_parser *bp)
> enable_spread_spectrum_on_ppll_v3;
> break;
> default:
> -   dm_error("Don't have enable_spread_spectrum_on_ppll for 
> v%d\n",
> +   dm_output_to_console("Don't have 
> enable_spread_spectrum_on_ppll for v%d\n",
>  
> BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
> bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
> break;
> @@ -1427,7 +1427,7 @@ static void init_adjust_display_pll(struct bios_parser 
> *bp)
> bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
> break;
> default:
> -   dm_error("Don't have adjust_display_pll for v%d\n",
> +   dm_output_to_console("Don't have adjust_display_pll for 
> v%d\n",
>  BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
> bp->cmd_tbl.adjust_display_pll = NULL;
> break;
> @@ -1702,7 +1702,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
> set_crtc_using_dtd_timing_v3;
> break;
> default:
> -   dm_error("Don't have set_crtc_timing for dtd v%d\n",
> +   dm_output_to_console("Don't have set_crtc_timing for 
> dtd v%d\n",
>  dtd_version);
> bp->cmd_tbl.set_crtc_timing = NULL;
> break;
> @@ -1713,7 +1713,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
> bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
> break;
> default:
> -   dm_error("Don't have set_crtc_timing for v%d\n",
> +   dm_output_to_console("Don't have set_crtc_timing for 
> v%d\n",
>  
> BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
> bp->cmd_tbl.set_crtc_timing = NULL;
> break;
> @@ -1901,7 +1901,7 @@ static void init_select_crtc_source(struct bios_parser 
> *bp)
> bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
> break;
> default:
> -   dm_error("Don't select_crtc_source enable_crtc for v%d\n",
> +   dm_output_to_console("Don't select_crtc_source enable_crtc 
> for v%d\n",
>  BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
> bp->cmd_tbl.select_crtc_source = NULL;
> break;
> @@ -2010,7 +2010,7 @@ static void init_enable_crtc(struct bios_parser *bp)
> bp->cmd_tbl.enable_crtc = enable_crtc_v1;
> break;
> default:
> -   dm_error("Don't have enable_crtc for v%d\n",
> +   dm_output_to_console("Don't have enable_crtc for v%d\n",
>  BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
> bp->cmd_tbl.enable_crtc = NULL;
> break;
> @@ -2118,7 +2118,7 @@ static void init_program_clock(struct bios_parser *bp

RE: PCIe3 atomics requirement for amdkfd

2018-01-03 Thread Bridgman, John
Agreed - MEC microcode uses atomics when the queue type is set to AQL (rather 
than PM4).

>-Original Message-
>From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of
>Liu, Shaoyun
>Sent: Wednesday, January 03, 2018 11:24 AM
>To: tstel...@redhat.com; Felix Kühling; amd-gfx@lists.freedesktop.org
>Cc: Kuehling, Felix
>Subject: RE: PCIe3 atomics requirement for amdkfd
>
>I think currently atomic  Ops are only used in AQL package which is only
>available for ROCm , graphics workload will not use AQL package.
>
>Regards
>Shaoyun.liu
>
>-Original Message-
>From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of
>Tom Stellard
>Sent: Wednesday, January 03, 2018 9:57 AM
>To: Felix Kühling; amd-gfx@lists.freedesktop.org
>Cc: Kuehling, Felix
>Subject: Re: PCIe3 atomics requirement for amdkfd
>
>On 12/23/2017 07:40 AM, Felix Kühling wrote:
>> As I understand it, it would require changes in the ROCr Runtime and
>> in the firmware (MEC microcode). It also changes the programming
>> model, so it may affect certain applications or higher level language
>> runtimes that rely on atomic operations.
>>
>
>How does the MEC microcode know that it is running a ROCm workload as
>opposed to a graphics workload that doesn't require PCIe3 atomics.  Is there a
>specific configuration bit that is set to indicate the ROCm programming model
>is needed?
>
>-Tom
>
>> Regards,
>>   Felix
>>
>>
>> Am 19.12.2017 um 16:04 schrieb Tom Stellard:
>>> Hi,
>>>
>>> How hard of a requirement is PCIe3 atomics for dGPUs with the amdkfd
>>> kernel driver?  Is it possible to make modifications to the
>>> runtime/kernel driver to drop this requirement?
>>>
>>> -Tom
>>> ___
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>
>___
>amd-gfx mailing list
>amd-gfx@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>___
>amd-gfx mailing list
>amd-gfx@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: Deadlocks with multiple applications on AMD RX 460 and RX 550 - Update 2

2018-01-03 Thread Michel Dänzer
On 2018-01-03 12:02 PM, Luís Mendes wrote:
> 
> What I believe it seems to be the case is that the GPU lock up only
> happens when doing a page flip, since the kernel locks with:
> [  243.693200] kworker/u4:3D089  2 0x
> [  243.693232] Workqueue: events_unbound commit_work [drm_kms_helper]
> [  243.693251] [<80b8c6d4>] (__schedule) from [<80b8cdd0>] 
> (schedule+0x4c/0xac)
> [  243.693259] [<80b8cdd0>] (schedule) from [<80b91024>]
> (schedule_timeout+0x228/0x444)
> [  243.693270] [<80b91024>] (schedule_timeout) from [<80886738>]
> (dma_fence_default_wait+0x2b4/0x2d8)
> [  243.693276] [<80886738>] (dma_fence_default_wait) from [<80885d60>]
> (dma_fence_wait_timeout+0x40/0x150)
> [  243.693284] [<80885d60>] (dma_fence_wait_timeout) from [<80887b1c>]
> (reservation_object_wait_timeout_rcu+0xfc/0x34c)
> [  243.693509] [<80887b1c>] (reservation_object_wait_timeout_rcu) from
> [<7f331988>] (amdgpu_dm_do_flip+0xec/0x36c [amdgpu])
> [  243.693789] [<7f331988>] (amdgpu_dm_do_flip [amdgpu]) from
> [<7f33309c>] (amdgpu_dm_atomic_commit_tail+0xbfc/0xe58 [amdgpu])
> ...

Does the problem also occur if you disable DC with amdgpu.dc=0 on the
kernel command line?

Does it also happen with a kernel built from the amd-staging-drm-next
branch instead of drm-next-4.16?


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4.15 1/4] drm/amd/display: Adding DCN1 registers

2018-01-03 Thread Carlo Caione
From: Mikita Lipski 

Registers added to definition list that are required
for multi display synchronization

Signed-off-by: Mikita Lipski 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
Signed-off-by: Alex Deucher 
---
 .../amd/display/dc/dcn10/dcn10_timing_generator.h  | 33 --
 1 file changed, 30 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
index 7d4818d7aa31..395820606013 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
@@ -72,7 +72,10 @@
SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
SRI(OPPBUF_CONTROL, OPPBUF, inst),\
SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, inst),\
-   SRI(CONTROL, VTG, inst)
+   SRI(CONTROL, VTG, inst),\
+   SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
+   SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
+   SRI(OTG_GSL_CONTROL, OTG, inst)
 
 #define TG_COMMON_REG_LIST_DCN1_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
@@ -82,6 +85,9 @@
 
 
 struct dcn_tg_registers {
+   uint32_t OTG_VERT_SYNC_CONTROL;
+   uint32_t OTG_MASTER_UPDATE_MODE;
+   uint32_t OTG_GSL_CONTROL;
uint32_t OTG_VSTARTUP_PARAM;
uint32_t OTG_VUPDATE_PARAM;
uint32_t OTG_VREADY_PARAM;
@@ -208,7 +214,18 @@ struct dcn_tg_registers {
SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, 
mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
-   SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh)
+   SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
+   SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, 
mask_sh),\
+   SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, 
mask_sh),\
+   SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\
+   SF(OTG0_OTG_MASTER_UPDATE_MODE, MASTER_UPDATE_INTERLACED_MODE, 
mask_sh),\
+   SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\
+   SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\
+   SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
+   SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
+   SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
+   SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh)
+
 
 #define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
@@ -317,7 +334,17 @@ struct dcn_tg_registers {
type OPPBUF_3D_VACT_SPACE1_SIZE;\
type VTG0_ENABLE;\
type VTG0_FP2;\
-   type VTG0_VCOUNT_INIT;
+   type VTG0_VCOUNT_INIT;\
+   type OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED;\
+   type OTG_FORCE_VSYNC_NEXT_LINE_CLEAR;\
+   type OTG_AUTO_FORCE_VSYNC_MODE;\
+   type MASTER_UPDATE_INTERLACED_MODE;\
+   type OTG_GSL0_EN;\
+   type OTG_GSL1_EN;\
+   type OTG_GSL2_EN;\
+   type OTG_GSL_MASTER_EN;\
+   type OTG_GSL_FORCE_DELAY;\
+   type OTG_GSL_CHECK_ALL_FIELDS;
 
 struct dcn_tg_shift {
TG_REG_FIELD_LIST(uint8_t)
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4.15 0/4] Backport DC commits to fix display corruption

2018-01-03 Thread Carlo Caione
From: Carlo Caione 

Hi,
on several laptops [0] we are seeing display corruption when using multiple /
external displays. We already opened an issue upstream [1].
The following 4 patches are taken from agd5f/amd-staging-drm-next and they seem
able to solve the issue.
Can those be included in 4.15?

Thank you,

[0] Acer Aspire E5-553G (AMD FX-9800P RADEON R7)
Acer Aspire E5-523G (AMD E2-9010 RADEON R2)
Acer Aspire A315-21 (AMD A4-9120 RADEON R3)
Acer Aspire A515-41G (AMD A10-9620 RADEON R5)

[1] https://bugs.freedesktop.org/show_bug.cgi?id=104319

Harry Wentland (1):
  drm/amd/display: Both timing_sync and multisync need stream_count > 1

Leo (Sunpeng) Li (1):
  drm/amd/display: Change frontend/backend programming sequence

Mikita Lipski (2):
  drm/amd/display: Adding DCN1 registers
  drm/amd/display: Multi display synchronization logic

 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |  54 -
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  60 -
 drivers/gpu/drm/amd/display/dc/dc.h|   3 +
 drivers/gpu/drm/amd/display/dc/dc_hw_types.h   |  16 ++
 .../amd/display/dc/dce110/dce110_hw_sequencer.c|  46 +++-
 .../display/dc/dce110/dce110_timing_generator.c| 265 +
 .../display/dc/dce110/dce110_timing_generator.h|   6 +
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  29 ++-
 .../amd/display/dc/dcn10/dcn10_timing_generator.c  |  66 -
 .../amd/display/dc/dcn10/dcn10_timing_generator.h  |  33 ++-
 .../drm/amd/display/dc/inc/hw/timing_generator.h   |   6 +-
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h  |   5 +
 12 files changed, 511 insertions(+), 78 deletions(-)

-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4.15 2/4] drm/amd/display: Multi display synchronization logic

2018-01-03 Thread Carlo Caione
From: Mikita Lipski 

This feature synchronizes multiple displays with various timings
to a display with the highest refresh rate
it is enabled if edid caps flag multi_display_sync is set to one

There are limitations on refresh rates allowed
that can be synchronized. That would
prevent from underflow and other potential
corruptions.

Multi display synchronization is using the
same functions as timing_sync in order to minimize
redunduncy and decision to disable synchronization is
based on trigger parametre set in DM

Feature is developed for DCN1 and DCE11

Signed-off-by: Mikita Lipski 
Reviewed-by: Mikita Lipski 
Acked-by: Harry Wentland 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |  54 -
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  26 +-
 drivers/gpu/drm/amd/display/dc/dc.h|   3 +
 drivers/gpu/drm/amd/display/dc/dc_hw_types.h   |  16 ++
 .../amd/display/dc/dce110/dce110_hw_sequencer.c|  46 +++-
 .../display/dc/dce110/dce110_timing_generator.c| 265 +
 .../display/dc/dce110/dce110_timing_generator.h|   6 +
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  29 ++-
 .../amd/display/dc/dcn10/dcn10_timing_generator.c  |  66 -
 .../drm/amd/display/dc/inc/hw/timing_generator.h   |   6 +-
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h  |   5 +
 11 files changed, 456 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bb5fa895fb64..0fae5dfc7614 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2330,6 +2330,56 @@ static int create_fake_sink(struct amdgpu_dm_connector 
*aconnector)
return 0;
 }
 
+static void set_multisync_trigger_params(
+   struct dc_stream_state *stream)
+{
+   if (stream->triggered_crtc_reset.enabled) {
+   stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
+   stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
+   }
+}
+
+static void set_master_stream(struct dc_stream_state *stream_set[],
+ int stream_count)
+{
+   int j, highest_rfr = 0, master_stream = 0;
+
+   for (j = 0;  j < stream_count; j++) {
+   if (stream_set[j] && 
stream_set[j]->triggered_crtc_reset.enabled) {
+   int refresh_rate = 0;
+
+   refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
+   
(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
+   if (refresh_rate > highest_rfr) {
+   highest_rfr = refresh_rate;
+   master_stream = j;
+   }
+   }
+   }
+   for (j = 0;  j < stream_count; j++) {
+   if (stream_set[j] && j != master_stream)
+   stream_set[j]->triggered_crtc_reset.event_source = 
stream_set[master_stream];
+   }
+}
+
+static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+{
+   int i = 0;
+
+   if (context->stream_count < 2)
+   return;
+   for (i = 0; i < context->stream_count ; i++) {
+   if (!context->streams[i])
+   continue;
+   /* TODO: add a function to read AMD VSDB bits and will set
+* crtc_sync_master.multi_sync_enabled flag
+* For now its set to false
+*/
+   set_multisync_trigger_params(context->streams[i]);
+   }
+   set_master_stream(context->streams, context->stream_count);
+}
+
 static struct dc_stream_state *
 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
   const struct drm_display_mode *drm_mode,
@@ -4160,8 +4210,10 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
}
}
 
-   if (dm_state->context)
+   if (dm_state->context) {
+   dm_enable_per_frame_crtc_master_sync(dm_state->context);
WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+   }
 
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7240db2e6f09..26c0dade7982 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -712,6 +712,28 @@ void dc_destroy(struct dc **dc)
*dc = NULL;
 }
 
+static void enable_timing_multisync(
+   struct dc *dc,
+   struct dc_state *ctx)
+{
+   int i = 0, multisync_count = 0;
+   int pipe_count = dc->res_pool->pipe_count;
+   struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
+
+   for (i = 0; i < pipe_cou

[PATCH 4.15 4/4] drm/amd/display: Change frontend/backend programming sequence

2018-01-03 Thread Carlo Caione
From: "Leo (Sunpeng) Li" 

This is a follow-up to the following change:

Yongqiang Sun: Program front end first when set mode.

Due to pipe-splitting features, how we handle stream enabling and
disabling needs to change.

In the case of pipe split disable, two planes need to be combined back
into the same stream. This needs to be done before any stream
programming happens.

The previous patch addresses this, but breaks cross-platform
compatibility. It's not guaranteed that a dc commit will be called
separately to program planes and streams.

Therefore, we handle the combined commit case by doing plane programming
both before and after stream programming, to handle pipe split disable
and plane enable respectively.

Signed-off-by: Leo (Sunpeng) Li 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 41 ++--
 1 file changed, 28 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5b3ca80d9401..8eeda0fb5c41 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -871,6 +871,33 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc);
 
+   /* Combine planes if required, in case of pipe split disable */
+   for (i = 0; i < dc->current_state->stream_count; i++) {
+   dc->hwss.apply_ctx_for_surface(
+   dc, dc->current_state->streams[i],
+   dc->current_state->stream_status[i].plane_count,
+   dc->current_state);
+   }
+
+   /* Program hardware */
+   dc->hwss.ready_shared_resources(dc, context);
+
+   for (i = 0; i < dc->res_pool->pipe_count; i++) {
+   pipe = &context->res_ctx.pipe_ctx[i];
+   dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+   }
+
+   result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+   if (result != DC_OK)
+   goto fail;
+
+   if (context->stream_count > 1) {
+   enable_timing_multisync(dc, context);
+   program_timing_sync(dc, context);
+   }
+
+   /* Program all planes within new context*/
for (i = 0; i < context->stream_count; i++) {
const struct dc_sink *sink = context->streams[i]->sink;
 
@@ -902,19 +929,7 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
context->streams[i]->timing.pix_clk_khz);
}
 
-   dc->hwss.ready_shared_resources(dc, context);
-
-   for (i = 0; i < dc->res_pool->pipe_count; i++) {
-   pipe = &context->res_ctx.pipe_ctx[i];
-   dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
-   }
-   result = dc->hwss.apply_ctx_to_hw(dc, context);
-
-   if (context->stream_count > 1) {
-   enable_timing_multisync(dc, context);
-   program_timing_sync(dc, context);
-   }
-
+fail:
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
for (i = 0; i < context->stream_count; i++) {
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4.15 3/4] drm/amd/display: Both timing_sync and multisync need stream_count > 1

2018-01-03 Thread Carlo Caione
From: Harry Wentland 

Previous code threw a warning about misleading indentation

Signed-off-by: Harry Wentland 
Reviewed-by: Mikita Lipski 
Acked-by: Harry Wentland 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 26c0dade7982..5b3ca80d9401 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -910,9 +910,10 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
}
result = dc->hwss.apply_ctx_to_hw(dc, context);
 
-   if (context->stream_count > 1)
+   if (context->stream_count > 1) {
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
+   }
 
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] Revert "drm/amd/amdgpu: set gtt size according to system memory size only"

2018-01-03 Thread Michel Dänzer
On 2017-12-29 08:36 PM, Felix Kuehling wrote:
> Is it possible that the test is broken? A test that allocates memory to
> exhaustion may well trigger the OOM killer. A test can do that by using
> malloc. Why not by using the graphics driver? The OOM killer does what
> it's supposed to do, and kills the broken application.
> 
> As I understand it, this change is adds artificial limitations to
> workaround a bug in a user mode test.

I'm afraid it's not that simple. While triggering the OOM killer might
be acceptable or even expected, hard hangs aren't.


-- 
Earthling Michel Dänzer   |   http://www.amd.com
Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4.15 0/4] Backport DC commits to fix display corruption

2018-01-03 Thread Harry Wentland
On 2018-01-03 12:11 PM, Carlo Caione wrote:
> From: Carlo Caione 
> 
> Hi,
> on several laptops [0] we are seeing display corruption when using multiple /
> external displays. We already opened an issue upstream [1].
> The following 4 patches are taken from agd5f/amd-staging-drm-next and they 
> seem
> able to solve the issue.
> Can those be included in 4.15?
> 
> Thank you,
> 
> [0] Acer Aspire E5-553G (AMD FX-9800P RADEON R7)
> Acer Aspire E5-523G (AMD E2-9010 RADEON R2)
> Acer Aspire A315-21 (AMD A4-9120 RADEON R3)
> Acer Aspire A515-41G (AMD A10-9620 RADEON R5)
> 
> [1] https://bugs.freedesktop.org/show_bug.cgi?id=104319
> 
> Harry Wentland (1):
>   drm/amd/display: Both timing_sync and multisync need stream_count > 1
> 
> Leo (Sunpeng) Li (1):
>   drm/amd/display: Change frontend/backend programming sequence
> 
> Mikita Lipski (2):
>   drm/amd/display: Adding DCN1 registers
>   drm/amd/display: Multi display synchronization logic
> 

I'm surprised Mikita and my patches are required here. Are you sure they make a 
difference for display corruption or did you simply add them to make pull Leo's 
patch more cleanly?

They're not bad changes. The "Multi display synchronization logic" patch is 
just very big and shouldn't have an impact expect in very unique situations.

Harry

>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |  54 -
>  drivers/gpu/drm/amd/display/dc/core/dc.c   |  60 -
>  drivers/gpu/drm/amd/display/dc/dc.h|   3 +
>  drivers/gpu/drm/amd/display/dc/dc_hw_types.h   |  16 ++
>  .../amd/display/dc/dce110/dce110_hw_sequencer.c|  46 +++-
>  .../display/dc/dce110/dce110_timing_generator.c| 265 
> +
>  .../display/dc/dce110/dce110_timing_generator.h|   6 +
>  .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  29 ++-
>  .../amd/display/dc/dcn10/dcn10_timing_generator.c  |  66 -
>  .../amd/display/dc/dcn10/dcn10_timing_generator.h  |  33 ++-
>  .../drm/amd/display/dc/inc/hw/timing_generator.h   |   6 +-
>  drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h  |   5 +
>  12 files changed, 511 insertions(+), 78 deletions(-)
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4.15 0/4] Backport DC commits to fix display corruption

2018-01-03 Thread Carlo Caione
On Wed, Jan 3, 2018 at 5:32 PM, Harry Wentland  wrote:
> On 2018-01-03 12:11 PM, Carlo Caione wrote:
>> From: Carlo Caione 
>>
>> Hi,
>> on several laptops [0] we are seeing display corruption when using multiple /
>> external displays. We already opened an issue upstream [1].
>> The following 4 patches are taken from agd5f/amd-staging-drm-next and they 
>> seem
>> able to solve the issue.
>> Can those be included in 4.15?
>>
>> Thank you,
>>
>> [0] Acer Aspire E5-553G (AMD FX-9800P RADEON R7)
>> Acer Aspire E5-523G (AMD E2-9010 RADEON R2)
>> Acer Aspire A315-21 (AMD A4-9120 RADEON R3)
>> Acer Aspire A515-41G (AMD A10-9620 RADEON R5)
>>
>> [1] https://bugs.freedesktop.org/show_bug.cgi?id=104319
>>
>> Harry Wentland (1):
>>   drm/amd/display: Both timing_sync and multisync need stream_count > 1
>>
>> Leo (Sunpeng) Li (1):
>>   drm/amd/display: Change frontend/backend programming sequence
>>
>> Mikita Lipski (2):
>>   drm/amd/display: Adding DCN1 registers
>>   drm/amd/display: Multi display synchronization logic
>>
>
> I'm surprised Mikita and my patches are required here. Are you sure they make 
> a difference for display corruption or did you simply add them to make pull 
> Leo's patch more cleanly?

Yes, the only strictly required patch is the patch by Leo. The others
are just pulled in to make that apply cleanly.

Thanks,

-- 
Carlo Caione  |  +44.7384.69.16.04  |  Endless
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: Deadlocks with multiple applications on AMD RX 460 and RX 550 - Update 2

2018-01-03 Thread Luís Mendes
Hi Michel, Christian,

Christian, I have followed your suggestion and I have just submitted a
bug to fdo at https://bugs.freedesktop.org/show_bug.cgi?id=104481 -
GPU lockup Polaris 11 - AMD RX 460 and RX 550 on amd64 and on ARMv7
platforms while playing video.

Michel, amdgpu.dc=0 seems to make no difference. I will try
amd-staging-drm-next and report back.

Regards,
Luís

On Wed, Jan 3, 2018 at 5:09 PM, Michel Dänzer  wrote:
> On 2018-01-03 12:02 PM, Luís Mendes wrote:
>>
>> What I believe it seems to be the case is that the GPU lock up only
>> happens when doing a page flip, since the kernel locks with:
>> [  243.693200] kworker/u4:3D089  2 0x
>> [  243.693232] Workqueue: events_unbound commit_work [drm_kms_helper]
>> [  243.693251] [<80b8c6d4>] (__schedule) from [<80b8cdd0>] 
>> (schedule+0x4c/0xac)
>> [  243.693259] [<80b8cdd0>] (schedule) from [<80b91024>]
>> (schedule_timeout+0x228/0x444)
>> [  243.693270] [<80b91024>] (schedule_timeout) from [<80886738>]
>> (dma_fence_default_wait+0x2b4/0x2d8)
>> [  243.693276] [<80886738>] (dma_fence_default_wait) from [<80885d60>]
>> (dma_fence_wait_timeout+0x40/0x150)
>> [  243.693284] [<80885d60>] (dma_fence_wait_timeout) from [<80887b1c>]
>> (reservation_object_wait_timeout_rcu+0xfc/0x34c)
>> [  243.693509] [<80887b1c>] (reservation_object_wait_timeout_rcu) from
>> [<7f331988>] (amdgpu_dm_do_flip+0xec/0x36c [amdgpu])
>> [  243.693789] [<7f331988>] (amdgpu_dm_do_flip [amdgpu]) from
>> [<7f33309c>] (amdgpu_dm_atomic_commit_tail+0xbfc/0xe58 [amdgpu])
>> ...
>
> Does the problem also occur if you disable DC with amdgpu.dc=0 on the
> kernel command line?
>
> Does it also happen with a kernel built from the amd-staging-drm-next
> branch instead of drm-next-4.16?
>
>
> --
> Earthling Michel Dänzer   |   http://www.amd.com
> Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[pull] amdgpu and ttm drm-fixes-4.15

2018-01-03 Thread Alex Deucher
Hi Dave,

Just two small fixes for 4.15:
- backport of a DC change which fixes a greenish tint on some RV hw
- properly handle kzalloc fail in ttm

The following changes since commit 03bfd4e19b935adb8be4f7342f13395fb7f11096:

  Merge tag 'drm-intel-fixes-2017-12-22-1' of 
git://anongit.freedesktop.org/drm/drm-intel into drm-fixes (2017-12-28 05:20:07 
+1000)

are available in the git repository at:

  git://people.freedesktop.org/~agd5f/linux drm-fixes-4.15

for you to fetch changes up to 19d859a7205bc59ffc38303eb25ae394f61d21dc:

  drm/ttm: check the return value of kzalloc (2018-01-02 14:54:05 -0500)


Xiongwei Song (1):
  drm/ttm: check the return value of kzalloc

Yue Hin Lau (1):
  drm/amd/display: call set csc_default if enable adjustment is false

 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h  | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c   | 6 ++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h   | 2 +-
 drivers/gpu/drm/ttm/ttm_page_alloc.c  | 2 ++
 5 files changed, 8 insertions(+), 6 deletions(-)
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/powerplay: use ffs/fls instead of implementing our own

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:24 PM, Evan Quan  wrote:
> Change-Id: I2683296f7b08cc637ed54b0b4b7db03b8818e658
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 27 
> --
>  1 file changed, 4 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index ed16468..54728b6 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -4488,7 +4488,6 @@ static int vega10_force_clock_level(struct pp_hwmgr 
> *hwmgr,
> enum pp_clock_type type, uint32_t mask)
>  {
> struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
> -   int i;
>
> if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> AMD_DPM_FORCED_LEVEL_LOW |
> @@ -4497,17 +4496,8 @@ static int vega10_force_clock_level(struct pp_hwmgr 
> *hwmgr,
>
> switch (type) {
> case PP_SCLK:
> -   for (i = 0; i < 32; i++) {
> -   if (mask & (1 << i))
> -   break;
> -   }
> -   data->smc_state_table.gfx_boot_level = i;
> -
> -   for (i = 31; i >= 0; i--) {
> -   if (mask & (1 << i))
> -   break;
> -   }
> -   data->smc_state_table.gfx_max_level = i;
> +   data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) 
> : 0;
> +   data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) 
> : 0;
>
> PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
> "Failed to upload boot level to lowest!",
> @@ -4519,17 +4509,8 @@ static int vega10_force_clock_level(struct pp_hwmgr 
> *hwmgr,
> break;
>
> case PP_MCLK:
> -   for (i = 0; i < 32; i++) {
> -   if (mask & (1 << i))
> -   break;
> -   }
> -   data->smc_state_table.mem_boot_level = i;
> -
> -   for (i = 31; i >= 0; i--) {
> -   if (mask & (1 << i))
> -   break;
> -   }
> -   data->smc_state_table.mem_max_level = i;
> +   data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) 
> : 0;
> +   data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) 
> : 0;
>
> PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
> "Failed to upload boot level to lowest!",
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/7] drm/amd/powerplay: correct PP_TemperatureRange member type since negative values are part of the valid range

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: I68bb53691fb2edca66c09391ab5e60357496283c
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c   | 4 ++--
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 4 ++--
>  drivers/gpu/drm/amd/powerplay/inc/power_state.h  | 4 ++--
>  3 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
> index d7aa643..f6573ed 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
> @@ -310,9 +310,9 @@ int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
>  static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
> uint32_t low_temp, uint32_t high_temp)
>  {
> -   uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
> +   int low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
> PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> -   uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
> +   int high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
> PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
>
> if (low < low_temp)
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
> index dc3761b..7491163 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
> @@ -386,9 +386,9 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
>  static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
> struct PP_TemperatureRange *range)
>  {
> -   uint32_t low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
> +   int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
> PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> -   uint32_t high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
> +   int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
> PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> uint32_t val, reg;
>
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h 
> b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
> index 827860f..a99b5cbb 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
> @@ -122,8 +122,8 @@ struct PP_StateSoftwareAlgorithmBlock {
>   * Type to hold a temperature range.
>   */
>  struct PP_TemperatureRange {
> -   uint32_t min;
> -   uint32_t max;
> +   int min;
> +   int max;
>  };
>
>  struct PP_StateValidationBlock {
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/7] drm/amd/powerplay: new cgs interface setting dpm thermal range

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: Ib4156edff8e3daaead720956142d8540df3254d8
> Signed-off-by: Evan Quan 

Please include a commit message. Something like:
This will be used by powerplay to update the dpm temp range structure used to
interface with hwmon.
With that added:
Reviewed-by: Alex Deucher 


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c  | 13 +
>  drivers/gpu/drm/amd/include/cgs_common.h |  7 +++
>  2 files changed, 20 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index 4466f35..bccb0f7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> @@ -1187,6 +1187,18 @@ static int amdgpu_cgs_call_acpi_method(struct 
> cgs_device *cgs_device,
> return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
>  }
>
> +static int amdgpu_cgs_set_temperature_range(struct cgs_device *cgs_device,
> +   int min_temperature,
> +   int max_temperature)
> +{
> +   CGS_FUNC_ADEV;
> +
> +   adev->pm.dpm.thermal.min_temp = min_temperature;
> +   adev->pm.dpm.thermal.max_temp = max_temperature;
> +
> +   return 0;
> +}
> +
>  static const struct cgs_ops amdgpu_cgs_ops = {
> .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
> .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
> @@ -1214,6 +1226,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
> .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
> .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
> .register_pp_handle = amdgpu_cgs_register_pp_handle,
> +   .set_temperature_range = amdgpu_cgs_set_temperature_range,
>  };
>
>  static const struct cgs_os_ops amdgpu_cgs_os_ops = {
> diff --git a/drivers/gpu/drm/amd/include/cgs_common.h 
> b/drivers/gpu/drm/amd/include/cgs_common.h
> index 675988d..f5c7397 100644
> --- a/drivers/gpu/drm/amd/include/cgs_common.h
> +++ b/drivers/gpu/drm/amd/include/cgs_common.h
> @@ -427,6 +427,9 @@ struct amd_pp_init;
>  typedef void* (*cgs_register_pp_handle)(struct cgs_device *cgs_device,
> int (*call_back_func)(struct amd_pp_init *, void **));
>
> +typedef int (*cgs_set_temperature_range)(struct cgs_device *cgs_device,
> +   int min_temperature,
> +   int max_temperature);
>  struct cgs_ops {
> /* memory management calls (similar to KFD interface) */
> cgs_alloc_gpu_mem_t alloc_gpu_mem;
> @@ -464,6 +467,7 @@ struct cgs_ops {
> cgs_enter_safe_mode enter_safe_mode;
> cgs_lock_grbm_idx lock_grbm_idx;
> cgs_register_pp_handle register_pp_handle;
> +   cgs_set_temperature_range set_temperature_range;
>  };
>
>  struct cgs_os_ops; /* To be define in OS-specific CGS header */
> @@ -545,4 +549,7 @@ struct cgs_device
>  #define cgs_register_pp_handle(cgs_device, call_back_func) \
> CGS_CALL(register_pp_handle, cgs_device, call_back_func)
>
> +#define cgs_set_temperature_range(dev, min_temp, max_temp) \
> +   CGS_CALL(set_temperature_range, dev, min_temp, max_temp)
> +
>  #endif /* _CGS_COMMON_H */
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/7] drm/amd/powerplay: new hw manager interface for retrieving device specific thermal range

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: Icae6e15c7494d12bf8a194b9f1a352e0dcdef4ed
> Signed-off-by: Evan Quan 

Please include a commit message.  Something like:
Add a new callback for asic specific backends to specify the temperature ranges.
With that added:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 ++
>  1 file changed, 2 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h 
> b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
> index bd2889f..384a4cb 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
> @@ -339,6 +339,8 @@ struct pp_hwmgr_func {
> uint32_t mc_addr_low,
> uint32_t mc_addr_hi,
> uint32_t size);
> +   int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *range);
>  };
>
>  struct pp_table_func {
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/7] drm/amd/powerplay: export thermal range through temp sysfs

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: I903ebb4d75ec0628e9720b6fee915c907e594613
> Signed-off-by: Evan Quan 

Please include a commit message.  Something like:
Populate the hwmon temp range as part of thermal controller setup.
With that added:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 11 +--
>  1 file changed, 9 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
> index 297ec0d..84e1cb0 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
> @@ -227,14 +227,21 @@ int phm_register_thermal_interrupt(struct pp_hwmgr 
> *hwmgr, const void *info)
>  */
>  int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
>  {
> +   int ret = 0;
> struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
>
> +   if (hwmgr->hwmgr_func->get_thermal_temperature_range)
> +   hwmgr->hwmgr_func->get_thermal_temperature_range(
> +   hwmgr, &range);
> +
> if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
> PHM_PlatformCaps_ThermalController)
> && hwmgr->hwmgr_func->start_thermal_controller != 
> NULL)
> -   return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, 
> &range);
> +   ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, 
> &range);
>
> -   return 0;
> +   cgs_set_temperature_range(hwmgr->device, range.min, range.max);
> +
> +   return ret;
>  }
>
>
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal ranges

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: If240a45dd2538e93185802b1fce54fd83de89de0
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 21 +
>  1 file changed, 21 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index 2d55dab..ed16468 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -4988,6 +4988,26 @@ static int vega10_notify_cac_buffer_info(struct 
> pp_hwmgr *hwmgr,
> return 0;
>  }
>
> +static struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =

Copy paste typo?  s/SMU7/vega10/?

Alex

> +{
> +   {-273150,  99000},
> +   { 12, 12},
> +};
> +
> +static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *thermal_data)
> +{
> +   struct phm_ppt_v2_information *table_info =
> +   (struct phm_ppt_v2_information *)hwmgr->pptable;
> +
> +   memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct 
> PP_TemperatureRange));
> +
> +   thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +
> +   return 0;
> +}
> +
>  static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
> const void *info)
>  {
> @@ -5074,6 +5094,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
> .set_mclk_od = vega10_set_mclk_od,
> .avfs_control = vega10_avfs_enable,
> .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
> +   .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
> .register_internal_thermal_interrupt = 
> vega10_register_thermal_interrupt,
> .start_thermal_controller = vega10_start_thermal_controller,
>  };
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 6/7] drm/amd/powerplay: export the thermal ranges of VI asics

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: I569179443c73c793153d5c499dd2f203f89e3ca2
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 26 
> 
>  1 file changed, 26 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> index 8d7fd06..dd304d6 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> @@ -4655,6 +4655,31 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
> *hwmgr,
> return 0;
>  }
>
> +static struct PP_TemperatureRange SMU7ThermalPolicy[] =
> +{
> +   {-273150,  99000},
> +   { 12, 12},
> +};
> +
> +static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *thermal_data)
> +{
> +   struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
> +   struct phm_ppt_v1_information *table_info =
> +   (struct phm_ppt_v1_information *)hwmgr->pptable;
> +
> +   memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct 
> PP_TemperatureRange));
> +
> +   if (hwmgr->pp_table_version == PP_TABLE_V1)
> +   thermal_data->max = 
> table_info->cac_dtp_table->usSoftwareShutdownTemp *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +   else if (hwmgr->pp_table_version == PP_TABLE_V0)
> +   thermal_data->max = 
> data->thermal_temp_setting.temperature_shutdown *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +
> +   return 0;
> +}
> +
>  static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
> .backend_init = &smu7_hwmgr_backend_init,
> .backend_fini = &smu7_hwmgr_backend_fini,
> @@ -4707,6 +4732,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
> .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
> .start_thermal_controller = smu7_start_thermal_controller,
> .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
> +   .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
>  };
>
>  uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 7/7] drm/amd/powerplay: export the thermal ranges of Carrizo

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: I861e3e6d4ec553171cbf369eca4ac9d834478290
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 20 
>  1 file changed, 20 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> index b314d09..7e87f09 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> @@ -1858,6 +1858,25 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr 
> *hwmgr,
> return 0;
>  }
>
> +static struct PP_TemperatureRange SMU7ThermalPolicy[] =
> +{
> +   {-273150,  99000},
> +   { 12, 12},
> +};
> +
> +static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *thermal_data)
> +{
> +   struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
> +
> +   memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct 
> PP_TemperatureRange));
> +
> +   thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
> +   cz_hwmgr->sys_info.htc_hyst_lmt) *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +
> +   return 0;
> +}
>
>  static const struct pp_hwmgr_func cz_hwmgr_funcs = {
> .backend_init = cz_hwmgr_backend_init,
> @@ -1890,6 +1909,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
> .power_state_set = cz_set_power_state_tasks,
> .dynamic_state_management_disable = cz_disable_dpm_tasks,
> .notify_cac_buffer_info = cz_notify_cac_buffer_info,
> +   .get_thermal_temperature_range = cz_get_thermal_temperature_range,
>  };
>
>  int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 7/7] drm/amd/powerplay: export the thermal ranges of Carrizo

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: I861e3e6d4ec553171cbf369eca4ac9d834478290
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 20 
>  1 file changed, 20 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> index b314d09..7e87f09 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> @@ -1858,6 +1858,25 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr 
> *hwmgr,
> return 0;
>  }
>
> +static struct PP_TemperatureRange SMU7ThermalPolicy[] =

copy past typo again.  With that fixed:
Reviewed-by: Alex Deucher 



> +{
> +   {-273150,  99000},
> +   { 12, 12},
> +};
> +
> +static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *thermal_data)
> +{
> +   struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
> +
> +   memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct 
> PP_TemperatureRange));
> +
> +   thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
> +   cz_hwmgr->sys_info.htc_hyst_lmt) *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +
> +   return 0;
> +}
>
>  static const struct pp_hwmgr_func cz_hwmgr_funcs = {
> .backend_init = cz_hwmgr_backend_init,
> @@ -1890,6 +1909,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
> .power_state_set = cz_set_power_state_tasks,
> .dynamic_state_management_disable = cz_disable_dpm_tasks,
> .notify_cac_buffer_info = cz_notify_cac_buffer_info,
> +   .get_thermal_temperature_range = cz_get_thermal_temperature_range,
>  };
>
>  int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal ranges

2018-01-03 Thread Alex Deucher
On Wed, Jan 3, 2018 at 3:16 PM, Alex Deucher  wrote:
> On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
>> Change-Id: If240a45dd2538e93185802b1fce54fd83de89de0
>> Signed-off-by: Evan Quan 
>> ---
>>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 21 
>> +
>>  1 file changed, 21 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
>> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> index 2d55dab..ed16468 100644
>> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> @@ -4988,6 +4988,26 @@ static int vega10_notify_cac_buffer_info(struct 
>> pp_hwmgr *hwmgr,
>> return 0;
>>  }
>>
>> +static struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =
>
> Copy paste typo?  s/SMU7/vega10/?

With that fixed:
Reviewed-by: Alex Deucher 

>
> Alex
>
>> +{
>> +   {-273150,  99000},
>> +   { 12, 12},
>> +};
>> +
>> +static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
>> +   struct PP_TemperatureRange *thermal_data)
>> +{
>> +   struct phm_ppt_v2_information *table_info =
>> +   (struct phm_ppt_v2_information *)hwmgr->pptable;
>> +
>> +   memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct 
>> PP_TemperatureRange));
>> +
>> +   thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
>> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
>> +
>> +   return 0;
>> +}
>> +
>>  static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
>> const void *info)
>>  {
>> @@ -5074,6 +5094,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = 
>> {
>> .set_mclk_od = vega10_set_mclk_od,
>> .avfs_control = vega10_avfs_enable,
>> .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
>> +   .get_thermal_temperature_range = 
>> vega10_get_thermal_temperature_range,
>> .register_internal_thermal_interrupt = 
>> vega10_register_thermal_interrupt,
>> .start_thermal_controller = vega10_start_thermal_controller,
>>  };
>> --
>> 2.7.4
>>
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/12] drm/amd/display: disablePSR in UpdatePlanes in PassiveLevel

2018-01-03 Thread Harry Wentland
From: Charlene Liu 

Signed-off-by: Charlene Liu 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index f663adb33584..48e21ead3142 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -521,6 +521,9 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, 
bool enable, bool wait)
if (dmcu->dmcu_state != DMCU_RUNNING)
return;
 
+   dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+   if (psr_state == 0 && !enable)
+   return;
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/12] drm/amd/display: Move dpp reg access from hwss to dpp module.

2018-01-03 Thread Harry Wentland
From: Yongqiang Sun 

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h |  8 -
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c   | 21 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h   | 21 +
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 35 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h   |  2 --
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h|  5 
 6 files changed, 49 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h 
b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index c9a45d5222e6..b3cd7ca7b4ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -140,10 +140,6 @@
BL_REG_LIST()
 
 #define HWSEQ_DCN_REG_LIST()\
-   SRII(DPP_CONTROL, DPP_TOP, 0), \
-   SRII(DPP_CONTROL, DPP_TOP, 1), \
-   SRII(DPP_CONTROL, DPP_TOP, 2), \
-   SRII(DPP_CONTROL, DPP_TOP, 3), \
SRII(OPP_PIPE_CONTROL, OPP_PIPE, 0), \
SRII(OPP_PIPE_CONTROL, OPP_PIPE, 1), \
SRII(OPP_PIPE_CONTROL, OPP_PIPE, 2), \
@@ -252,7 +248,6 @@ struct dce_hwseq_registers {
uint32_t DCHUB_AGP_BOT;
uint32_t DCHUB_AGP_TOP;
 
-   uint32_t DPP_CONTROL[4];
uint32_t OPP_PIPE_CONTROL[4];
uint32_t REFCLK_CNTL;
uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
@@ -423,7 +418,6 @@ struct dce_hwseq_registers {
 #define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, 
mask_sh), \
-   HWS_SF(DPP_TOP0_, DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
HWS_SF(OPP_PIPE0_, OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh),\
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 
mask_sh), \
HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
@@ -445,7 +439,6 @@ struct dce_hwseq_registers {
HWS_SF(, DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \
HWS_SF(, DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \
HWS_SF(, DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh), \
-   HWS_SF(DPP_TOP0_, DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh), \
/* todo:  get these from GVM instead of reading registers ourselves */\
HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 
PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 
PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
@@ -520,7 +513,6 @@ struct dce_hwseq_registers {
type HUBP_VTG_SEL; \
type HUBP_CLOCK_ENABLE; \
type DPP_CLOCK_ENABLE; \
-   type DPPCLK_RATE_CONTROL; \
type SDPIF_FB_TOP;\
type SDPIF_FB_BASE;\
type SDPIF_FB_OFFSET;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index f2a08b156cf0..080c25383a4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -424,6 +424,26 @@ void dpp1_set_cursor_position(
 
 }
 
+void dpp1_dppclk_control(
+   struct dpp *dpp_base,
+   bool dppclk_div,
+   bool enable)
+{
+   struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+   if (enable) {
+   if (dpp->tf_mask->DPPCLK_RATE_CONTROL) {
+   REG_UPDATE_2(DPP_CONTROL,
+   DPPCLK_RATE_CONTROL, dppclk_div,
+   DPP_CLOCK_ENABLE, 1);
+   } else {
+   ASSERT(dppclk_div == false);
+   REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1);
+   }
+   } else
+   REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
+}
+
 static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
@@ -445,6 +465,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_full_bypass= dpp1_full_bypass,
.set_cursor_attributes = dpp1_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
+   .dpp_dppclk_control = dpp1_dppclk_control,
 };
 
 static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index f56ee4d08d89..ad6073a077ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -112,7 +112,8 @@
SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
-   SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+   SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+   SRI(DPP_CONTROL

[PATCH 04/12] drm/amd/display: Fix null-derefs on non-dcn builds

2018-01-03 Thread Harry Wentland
From: Roman Li 

Fixing regression introduced by
'Use real BE and FE index to program regs.'

Signed-off-by: Roman Li 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c   | 3 ++-
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ba88284d305a..a215c5b74cbd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1407,7 +1407,8 @@ static int acquire_first_free_pipe(
pipe_ctx->plane_res.xfm = pool->transforms[i];
pipe_ctx->plane_res.dpp = pool->dpps[i];
pipe_ctx->stream_res.opp = pool->opps[i];
-   pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
+   if (pool->dpps[i])
+   pipe_ctx->plane_res.mpcc_inst = 
pool->dpps[i]->inst;
pipe_ctx->pipe_idx = i;
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 58823bafdbe6..d036d1da03e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2819,7 +2819,8 @@ static void dce110_apply_ctx_for_surface(
 
 static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
-   int fe_idx = pipe_ctx->plane_res.mi->inst;
+   int fe_idx = pipe_ctx->plane_res.mi ?
+   pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx;
 
/* Do not power down fe when stream is active on dce*/
if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/12] drm/amd/display: Check opplist in pipe ctx not in res pool.

2018-01-03 Thread Harry Wentland
From: Yongqiang Sun 

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 5f9094c803cc..4fedd7b68062 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -600,13 +600,13 @@ static void plane_atomic_disconnect(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
struct hubp *hubp = pipe_ctx->plane_res.hubp;
int dpp_id = pipe_ctx->plane_res.dpp->inst;
struct mpc *mpc = dc->res_pool->mpc;
-   int opp_id;
+   int pipe_idx;
struct mpc_tree *mpc_tree_params;
struct mpcc *mpcc_to_remove = NULL;
 
/* look at tree rather than mi here to know if we already reset */
-   for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) {
-   struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
+   for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+   struct output_pixel_processor *opp = 
dc->res_pool->opps[pipe_idx];
 
mpc_tree_params = &(opp->mpc_tree_params);
mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, 
dpp_id);
@@ -615,11 +615,11 @@ static void plane_atomic_disconnect(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
}
 
/*Already reset*/
-   if (opp_id == dc->res_pool->pipe_count)
+   if (pipe_idx == dc->res_pool->pipe_count)
return;
 
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
-   
dc->res_pool->opps[opp_id]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst]
 = true;
+   
dc->res_pool->opps[pipe_idx]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst]
 = true;
 
dc->optimized_required = true;
 
@@ -665,7 +665,7 @@ static void plane_atomic_disable(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
REG_UPDATE(DPP_CONTROL[dpp_id],
DPP_CLOCK_ENABLE, 0);
 
-   if (opp_id != 0xf && 
dc->res_pool->opps[opp_id]->mpc_tree_params.opp_list == NULL)
+   if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list 
== NULL)
REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
OPP_PIPE_CLOCK_EN, 0);
 
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/12] drm/amd/display: Refactor remove mpcc processing.

2018-01-03 Thread Harry Wentland
From: Yongqiang Sun 

No need to use loop find opp, use opp in stream_res.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 19 +--
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index b039135b..8b6c2499453f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -580,26 +580,19 @@ static void plane_atomic_disconnect(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
struct hubp *hubp = pipe_ctx->plane_res.hubp;
int dpp_id = pipe_ctx->plane_res.dpp->inst;
struct mpc *mpc = dc->res_pool->mpc;
-   int pipe_idx;
struct mpc_tree *mpc_tree_params;
struct mpcc *mpcc_to_remove = NULL;
+   struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
 
-   /* look at tree rather than mi here to know if we already reset */
-   for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
-   struct output_pixel_processor *opp = 
dc->res_pool->opps[pipe_idx];
-
-   mpc_tree_params = &(opp->mpc_tree_params);
-   mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, 
dpp_id);
-   if (mpcc_to_remove != NULL)
-   break;
-   }
+   mpc_tree_params = &(opp->mpc_tree_params);
+   mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
 
/*Already reset*/
-   if (pipe_idx == dc->res_pool->pipe_count)
+   if (mpcc_to_remove == NULL)
return;
 
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
-   
dc->res_pool->opps[pipe_idx]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst]
 = true;
+   opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
 
dc->optimized_required = true;
 
@@ -975,8 +968,6 @@ static void dcn10_pipe_control_lock(
struct pipe_ctx *pipe,
bool lock)
 {
-   struct hubp *hubp = NULL;
-   hubp = dc->res_pool->hubps[pipe->pipe_idx];
/* use TG master update lock to lock everything on the TG
 * therefore only top pipe need to lock
 */
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/12] drm/amd/display: Drop dm_connector_update_modes

2018-01-03 Thread Harry Wentland
It's unused since the drm_edid_to_eld cleanup

Signed-off-by: Harry Wentland 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 93421dad21bd..1e8a21b67df7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -174,12 +174,6 @@ static const struct drm_connector_funcs 
dm_dp_mst_connector_funcs = {
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
 };
 
-static int dm_connector_update_modes(struct drm_connector *connector,
-   struct edid *edid)
-{
-   return drm_add_edid_modes(connector, edid);
-}
-
 void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
 {
struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
@@ -228,7 +222,7 @@ static int dm_dp_mst_get_modes(struct drm_connector 
*connector)
int ret = 0;
 
if (!aconnector)
-   return dm_connector_update_modes(connector, NULL);
+   return drm_add_edid_modes(connector, NULL);
 
if (!aconnector->edid) {
struct edid *edid;
@@ -264,7 +258,7 @@ static int dm_dp_mst_get_modes(struct drm_connector 
*connector)
&aconnector->base, edid);
}
 
-   ret = dm_connector_update_modes(connector, aconnector->edid);
+   ret = drm_add_edid_modes(connector, aconnector->edid);
 
return ret;
 }
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/12] drm/amd/display: clean up DCHUBBUB register definition in hwseq

2018-01-03 Thread Harry Wentland
From: Eric Bernstein 

Cleanup to remove unused register definition from hw sequencer
header file since implementation moved from hw sequencer to dchubub file.

Signed-off-by: Eric Bernstein 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 76 +-
 1 file changed, 1 insertion(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h 
b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index aea64946c409..3336428b1fed 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -141,25 +141,7 @@
 
 #define HWSEQ_DCN_REG_LIST()\
SR(REFCLK_CNTL), \
-   SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
-   SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
-   SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
-   SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
-   SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
-   SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
-   SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
-   SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
-   SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
-   SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
-   SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
-   SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
-   SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
-   SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
-   SR(DCHUBBUB_ARB_SAT_LEVEL),\
-   SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
-   SR(DCHUBBUB_TEST_DEBUG_INDEX), \
-   SR(DCHUBBUB_TEST_DEBUG_DATA), \
SR(DIO_MEM_PWR_CTRL), \
SR(DCCG_GATE_DISABLE_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL2), \
@@ -179,22 +161,10 @@
MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
 
-#define HWSEQ_SR_WATERMARK_REG_LIST()\
-   SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
-   SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
-   SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\
-   SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\
-   SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\
-   SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\
-   SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\
-   SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
-
 #define HWSEQ_DCN1_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
-   HWSEQ_SR_WATERMARK_REG_LIST(), \
HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
HWSEQ_PHYPLL_REG_LIST(OTG), \
-   SR(DCHUBBUB_SDPIF_FB_TOP),\
SR(DCHUBBUB_SDPIF_FB_BASE),\
SR(DCHUBBUB_SDPIF_FB_OFFSET),\
SR(DCHUBBUB_SDPIF_AGP_BASE),\
@@ -245,34 +215,8 @@ struct dce_hwseq_registers {
uint32_t DCHUB_AGP_TOP;
 
uint32_t REFCLK_CNTL;
-   uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
-   uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;
-   uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;
-   uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;
-   uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;
-   uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;
-   uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;
-   uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;
-   uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;
-   uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;
-   uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;
-   uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D;
-   uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;
-   uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL;
-   uint32_t DCHUBBUB_ARB_SAT_LEVEL;
-   uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND;
+
uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL;
-   uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL;
-   uint32_t DCHUBBUB_TEST_DEBUG_INDEX;
-   uint32_t DCHUBBUB_TEST_DEBUG_DATA;
-   uint32_t DCHUBBUB_SDPIF_FB_TOP;
uint32_t DCHUBBUB_SDPIF_FB_BASE;
uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
uint32_t DCHUBBUB_SDPIF_AGP_BASE;
@@ -414,20 +358,11 @@ struct dce_hwseq_registers {
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, 
mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 
mask_sh), \
-   HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
-   HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABL

[PATCH 09/12] drm/amd/display: Implement interface for CRC on CRTC

2018-01-03 Thread Harry Wentland
From: "Leo (Sunpeng) Li" 

Add interfaces in DC for per CRTC CRC configuration and fetching.
Also implement amdgpu_dm functions to hook onto DRM.

Signed-off-by: Leo (Sunpeng) Li 
Reviewed-by: Tony Cheng 
Reviewed-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/Makefile |   4 +
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |   2 +
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h  |  12 +++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c  | 113 +
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  85 
 drivers/gpu/drm/amd/display/dc/dc_stream.h |  11 ++
 .../drm/amd/display/dc/inc/hw/timing_generator.h   |  44 
 7 files changed, 271 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile 
b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 2b72009844f8..d7accc2071c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
 AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
 endif
 
+ifneq ($(CONFIG_DEBUG_FS),)
+AMDGPUDM += amdgpu_dm_crc.o
+endif
+
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
 
 AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2517bb575e24..5a70682c30aa 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -319,6 +319,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
crtc_index = acrtc->crtc_id;
 
drm_handle_vblank(adev->ddev, crtc_index);
+   amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
 }
 
 static int dm_set_clockgating_state(void *handle,
@@ -2528,6 +2529,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = 
{
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = dm_crtc_duplicate_state,
.atomic_destroy_state = dm_crtc_destroy_state,
+   .set_crc_source = amdgpu_dm_crtc_set_crc_source,
 };
 
 static enum drm_connector_status
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 3c9154f2d058..996ab81140df 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -210,6 +210,8 @@ struct dm_plane_state {
 struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
+
+   bool crc_first_skipped;
 };
 
 #define to_dm_crtc_state(x)container_of(x, struct dm_crtc_state, base)
@@ -269,6 +271,16 @@ void amdgpu_dm_add_sink_to_freesync_module(struct 
drm_connector *connector,
 void
 amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
 
+/* amdgpu_dm_crc.c */
+#ifdef CONFIG_DEBUG_FS
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt);
+void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
+#else
+#define amdgpu_dm_crtc_set_crc_source NULL
+void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) {}
+#endif
+
 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
 
 #endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
new file mode 100644
index ..5768103803fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include 
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "dc.h"
+
+enum amdgpu_dm_pipe_crc_s

[PATCH 00/12] DC Patches Jan 3, 2017

2018-01-03 Thread Harry Wentland
 * Fix for null dereference non-DCN ASICs at boot
 * CRC readback support for DCE 10 to DCE 11.2
 * Bunch of DCN cleanup

Charlene Liu (1):
  drm/amd/display: disablePSR in UpdatePlanes in PassiveLevel

Eric Bernstein (2):
  drm/amd/display: clean up DCHUBBUB register definition in hwseq
  drm/amd/display: Update dcn10_init_hw for FPGA

Harry Wentland (1):
  drm/amd/display: Drop dm_connector_update_modes

Krunoslav Kovac (1):
  drm/amd/display: [RS4][RV] SDR Brightness Boost

Leo (Sunpeng) Li (2):
  drm/amd/display: Implement interface for CRC on CRTC
  drm/amd/display: Implement CRTC CRC for DCE110

Roman Li (1):
  drm/amd/display: Fix null-derefs on non-dcn builds

Yongqiang Sun (4):
  drm/amd/display: Check opplist in pipe ctx not in res pool.
  drm/amd/display: Move dpp reg access from hwss to dpp module.
  drm/amd/display: Move opp reg access from hwss to opp module.
  drm/amd/display: Refactor remove mpcc processing.

 drivers/gpu/drm/amd/display/amdgpu_dm/Makefile |   4 +
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |   2 +
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h  |  12 ++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c  | 113 +++
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c|  10 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  91 ++-
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c  |   3 +-
 drivers/gpu/drm/amd/display/dc/dc.h|   5 +
 drivers/gpu/drm/amd/display/dc/dc_stream.h |  11 ++
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c  |   3 +
 drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h |  90 +--
 .../amd/display/dc/dce110/dce110_hw_sequencer.c|   3 +-
 .../display/dc/dce110/dce110_timing_generator.c| 122 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c   |  21 
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h   |  21 +++-
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 102 +++--
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h   |   2 -
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c   |   9 ++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h   |  14 ++-
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h|   5 +
 drivers/gpu/drm/amd/display/dc/inc/hw/opp.h|   4 +
 .../drm/amd/display/dc/inc/hw/timing_generator.h   |  44 
 22 files changed, 516 insertions(+), 175 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c

-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/12] drm/amd/display: Update dcn10_init_hw for FPGA

2018-01-03 Thread Harry Wentland
From: Eric Bernstein 

Update dcn10_init_hw such that initialization of relevant
HW blocks for Maximus FPGA are also initialized (and not skipped).

Signed-off-by: Eric Bernstein 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 35 --
 1 file changed, 19 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8b6c2499453f..7e99d788f0e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -692,26 +692,25 @@ static void dcn10_init_hw(struct dc *dc)
}
 
enable_power_gating_plane(dc->hwseq, true);
-   return;
-   }
-   /* end of FPGA. Below if real ASIC */
+   } else {
 
-   if (!dcb->funcs->is_accelerated_mode(dcb)) {
-   bios_golden_init(dc);
-   disable_vga(dc->hwseq);
-   }
+   if (!dcb->funcs->is_accelerated_mode(dcb)) {
+   bios_golden_init(dc);
+   disable_vga(dc->hwseq);
+   }
 
-   for (i = 0; i < dc->link_count; i++) {
-   /* Power up AND update implementation according to the
-* required signal (which may be different from the
-* default signal on connector).
-*/
-   struct dc_link *link = dc->links[i];
+   for (i = 0; i < dc->link_count; i++) {
+   /* Power up AND update implementation according to the
+* required signal (which may be different from the
+* default signal on connector).
+*/
+   struct dc_link *link = dc->links[i];
 
-   if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
-   dc->hwss.edp_power_control(link, true);
+   if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
+   dc->hwss.edp_power_control(link, true);
 
-   link->link_enc->funcs->hw_init(link->link_enc);
+   link->link_enc->funcs->hw_init(link->link_enc);
+   }
}
 
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -779,6 +778,10 @@ static void dcn10_init_hw(struct dc *dc)
tg->funcs->tg_init(tg);
}
 
+   /* end of FPGA. Below if real ASIC */
+   if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+   return;
+
for (i = 0; i < dc->res_pool->audio_count; i++) {
struct audio *audio = dc->res_pool->audios[i];
 
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 12/12] drm/amd/display: [RS4][RV] SDR Brightness Boost

2018-01-03 Thread Harry Wentland
From: Krunoslav Kovac 

We assume FP16 1.0 frame buffer value maps to 80 nits.
DC changes are to make this configurable.

Signed-off-by: Krunoslav Kovac 
Reviewed-by: Anthony Koo 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 6 +-
 drivers/gpu/drm/amd/display/dc/dc.h  | 5 +
 2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 350458d3730c..990f891a46f6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1057,6 +1057,9 @@ static enum surface_update_type 
get_plane_info_update_type(const struct dc_surfa
if (u->plane_info->input_tf != u->surface->input_tf)
update_flags->bits.input_tf_change = 1;
 
+   if (u->plane_info->sdr_white_level != u->surface->sdr_white_level)
+   update_flags->bits.output_tf_change = 1;
+
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
update_flags->bits.horizontal_mirror_change = 1;
 
@@ -1101,7 +1104,8 @@ static enum surface_update_type 
get_plane_info_update_type(const struct dc_surfa
|| update_flags->bits.stereo_format_change
|| update_flags->bits.gamma_change
|| update_flags->bits.bpp_change
-   || update_flags->bits.bandwidth_change)
+   || update_flags->bits.bandwidth_change
+   || update_flags->bits.output_tf_change)
return UPDATE_TYPE_FULL;
 
return UPDATE_TYPE_MED;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index bc595bc15fe2..41d095318a5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -367,6 +367,8 @@ struct dc_transfer_func {
struct dc_transfer_func_distributed_points tf_pts;
enum dc_transfer_func_type type;
enum dc_transfer_func_predefined tf;
+   /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
+   uint32_t sdr_ref_white_level;
struct dc_context *ctx;
 };
 
@@ -397,6 +399,7 @@ union surface_update_flags {
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
+   uint32_t output_tf_change:1;
 
/* Full updates */
uint32_t new_plane:1;
@@ -428,6 +431,7 @@ struct dc_plane_state {
struct dc_bias_and_scale *bias_and_scale;
struct csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
+   uint32_t sdr_white_level;
 
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
@@ -464,6 +468,7 @@ struct dc_plane_info {
enum plane_stereo_format stereo_format;
enum dc_color_space color_space;
enum color_transfer_func input_tf;
+   unsigned int sdr_white_level;
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/12] drm/amd/display: Move opp reg access from hwss to opp module.

2018-01-03 Thread Harry Wentland
From: Yongqiang Sun 

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h|  6 --
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 11 ++-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c  |  9 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h  | 14 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/opp.h   |  4 
 5 files changed, 29 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h 
b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index b3cd7ca7b4ef..aea64946c409 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -140,10 +140,6 @@
BL_REG_LIST()
 
 #define HWSEQ_DCN_REG_LIST()\
-   SRII(OPP_PIPE_CONTROL, OPP_PIPE, 0), \
-   SRII(OPP_PIPE_CONTROL, OPP_PIPE, 1), \
-   SRII(OPP_PIPE_CONTROL, OPP_PIPE, 2), \
-   SRII(OPP_PIPE_CONTROL, OPP_PIPE, 3), \
SR(REFCLK_CNTL), \
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
@@ -248,7 +244,6 @@ struct dce_hwseq_registers {
uint32_t DCHUB_AGP_BOT;
uint32_t DCHUB_AGP_TOP;
 
-   uint32_t OPP_PIPE_CONTROL[4];
uint32_t REFCLK_CNTL;
uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
@@ -418,7 +413,6 @@ struct dce_hwseq_registers {
 #define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, 
mask_sh), \
-   HWS_SF(OPP_PIPE0_, OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh),\
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 
mask_sh), \
HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, 
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 5431de7419b3..b039135b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -633,7 +633,6 @@ static void plane_atomic_power_down(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
  */
 static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
-   struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
int opp_id = hubp->opp_id;
@@ -645,8 +644,9 @@ static void plane_atomic_disable(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
 
if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list 
== NULL)
-   REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
-   OPP_PIPE_CLOCK_EN, 0);
+   pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
+   pipe_ctx->stream_res.opp,
+   false);
 
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to 
optimize */
@@ -1311,8 +1311,9 @@ static void dcn10_enable_plane(

pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
 
/* make sure OPP_PIPE_CLOCK_EN = 1 */
-   REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst],
-   OPP_PIPE_CLOCK_EN, 1);
+   pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
+   pipe_ctx->stream_res.opp,
+   true);
 
 /* TODO: enable/disable in dm as per update type.
if (plane_state) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index f6ba0eef4489..77a1a9d541a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -367,6 +367,14 @@ void opp1_program_oppbuf(
 
 }
 
+void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
+{
+   struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+   uint32_t regval = enable ? 1 : 0;
+
+   REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
+}
+
 /*/
 /* Constructor, Destructor   */
 /*/
@@ -382,6 +390,7 @@ static struct opp_funcs dcn10_opp_funcs = {
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = 
opp1_program_bit_depth_reduction,
.opp_program_stereo = opp1_program_stereo,
+   .opp_pipe_clock_control = opp1_pipe_clock_control,

[PATCH 10/12] drm/amd/display: Implement CRTC CRC for DCE110

2018-01-03 Thread Harry Wentland
From: "Leo (Sunpeng) Li" 

Implement the timing generator hooks for configure_crc and get_crc.
Also implement is_tg_enabled, as configure_crc uses it.

Signed-off-by: Leo (Sunpeng) Li 
Reviewed-by: Tony Cheng 
Reviewed-by: Harry Wentland 
---
 .../display/dc/dce110/dce110_timing_generator.c| 122 +
 1 file changed, 122 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 25ca72139e5f..078d18c3eee5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -2077,6 +2077,125 @@ bool dce110_arm_vert_intr(struct timing_generator *tg, 
uint8_t width)
return true;
 }
 
+static bool dce110_is_tg_enabled(struct timing_generator *tg)
+{
+   uint32_t addr = 0;
+   uint32_t value = 0;
+   uint32_t field = 0;
+   struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+   addr = CRTC_REG(mmCRTC_CONTROL);
+   value = dm_read_reg(tg->ctx, addr);
+   field = get_reg_field_value(value, CRTC_CONTROL,
+   CRTC_CURRENT_MASTER_EN_STATE);
+   return field == 1;
+}
+
+static bool dce110_configure_crc(struct timing_generator *tg,
+const struct crc_params *params)
+{
+   uint32_t cntl_addr = 0;
+   uint32_t addr = 0;
+   uint32_t value;
+   struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+   /* Cannot configure crc on a CRTC that is disabled */
+   if (!dce110_is_tg_enabled(tg))
+   return false;
+
+   cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL);
+
+   /* First, disable CRC before we configure it. */
+   dm_write_reg(tg->ctx, cntl_addr, 0);
+
+   if (!params->enable)
+   return true;
+
+   /* Program frame boundaries */
+   /* Window A x axis start and end. */
+   value = 0;
+   addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
+   set_reg_field_value(value, params->windowa_x_start,
+   CRTC_CRC0_WINDOWA_X_CONTROL,
+   CRTC_CRC0_WINDOWA_X_START);
+   set_reg_field_value(value, params->windowa_x_end,
+   CRTC_CRC0_WINDOWA_X_CONTROL,
+   CRTC_CRC0_WINDOWA_X_END);
+   dm_write_reg(tg->ctx, addr, value);
+
+   /* Window A y axis start and end. */
+   value = 0;
+   addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
+   set_reg_field_value(value, params->windowa_y_start,
+   CRTC_CRC0_WINDOWA_Y_CONTROL,
+   CRTC_CRC0_WINDOWA_Y_START);
+   set_reg_field_value(value, params->windowa_y_end,
+   CRTC_CRC0_WINDOWA_Y_CONTROL,
+   CRTC_CRC0_WINDOWA_Y_END);
+   dm_write_reg(tg->ctx, addr, value);
+
+   /* Window B x axis start and end. */
+   value = 0;
+   addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
+   set_reg_field_value(value, params->windowb_x_start,
+   CRTC_CRC0_WINDOWB_X_CONTROL,
+   CRTC_CRC0_WINDOWB_X_START);
+   set_reg_field_value(value, params->windowb_x_end,
+   CRTC_CRC0_WINDOWB_X_CONTROL,
+   CRTC_CRC0_WINDOWB_X_END);
+   dm_write_reg(tg->ctx, addr, value);
+
+   /* Window B y axis start and end. */
+   value = 0;
+   addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
+   set_reg_field_value(value, params->windowb_y_start,
+   CRTC_CRC0_WINDOWB_Y_CONTROL,
+   CRTC_CRC0_WINDOWB_Y_START);
+   set_reg_field_value(value, params->windowb_y_end,
+   CRTC_CRC0_WINDOWB_Y_CONTROL,
+   CRTC_CRC0_WINDOWB_Y_END);
+   dm_write_reg(tg->ctx, addr, value);
+
+   /* Set crc mode and selection, and enable. Only using CRC0*/
+   value = 0;
+   set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+   CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+   set_reg_field_value(value, params->selection,
+   CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
+   set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+   dm_write_reg(tg->ctx, cntl_addr, value);
+
+   return true;
+}
+
+static bool dce110_get_crc(struct timing_generator *tg,
+  uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+   uint32_t addr = 0;
+   uint32_t value = 0;
+   uint32_t field = 0;
+   struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+   addr = CRTC_REG(mmCRTC_CRC_CNTL);
+   value = dm_read_reg(tg->ctx, addr);
+   field = get_reg_field_value(value, CRTC_CRC_CNTL, CRTC_CRC_EN);
+
+   /* Early return if CRC is not enabled for this CRTC */
+   if (!field)
+   ret

[PATCH] drm/amd/powerplay: Fix braces around scalar initializer warning

2018-01-03 Thread Harry Wentland
This fixes the following two warnings:

drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c: In function 
‘phm_start_thermal_controller’:
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:230:9: warning: 
braces around scalar initializer
  struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
   ^~~
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:230:9: note: 
(near initialization for ‘range.min’)
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:31:24: warning: 
excess elements in scalar initializer
 #define TEMP_RANGE_MAX (80 * 1000)

Signed-off-by: Harry Wentland 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 297ec0db2ef5..e23f63ea07cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -227,7 +227,7 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, 
const void *info)
 */
 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
 {
-   struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
+   struct PP_TemperatureRange range = {TEMP_RANGE_MIN, TEMP_RANGE_MAX};
 
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalController)
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/powerplay: Fix braces around scalar initializer warning

2018-01-03 Thread Deucher, Alexander
Reviewed-by: Alex Deucher 


From: amd-gfx  on behalf of Harry 
Wentland 
Sent: Wednesday, January 3, 2018 3:57 PM
To: amd-gfx@lists.freedesktop.org
Cc: Wentland, Harry
Subject: [PATCH] drm/amd/powerplay: Fix braces around scalar initializer warning

This fixes the following two warnings:

drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c: In function 
‘phm_start_thermal_controller’:
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:230:9: warning: 
braces around scalar initializer
  struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
   ^~~
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:230:9: note: 
(near initialization for ‘range.min’)
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:31:24: warning: 
excess elements in scalar initializer
 #define TEMP_RANGE_MAX (80 * 1000)

Signed-off-by: Harry Wentland 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 297ec0db2ef5..e23f63ea07cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -227,7 +227,7 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, 
const void *info)
 */
 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
 {
-   struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
+   struct PP_TemperatureRange range = {TEMP_RANGE_MIN, TEMP_RANGE_MAX};

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 PHM_PlatformCaps_ThermalController)
--
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Radeon rv250 GPU acceleration broken in 4.15-rc6

2018-01-03 Thread Meelis Roos
Beacuse of PTI, I decided to test 4.15-rc6 on most of my test machines. 
And I happened to notice a radeon problem on RV250 era card with no 
monitor attached. 4.15-rc1 and earlier kernels did not show these 
errors, not kernels between rc1 and rc6 have been tested yet.

The messages in dmes are strange, like 2 threads trying to poke the card 
instead of one? Or is it just rrying AGP mode and then PCI mode and this 
double output is OK?

[4.777185] [drm] radeon kernel modesetting enabled.
[4.779418] [drm] initializing kernel modesetting (RV250 0x1002:0x4966 
0x1458:0x4010 0x01).
[4.779888] agpgart-amdk7 :00:00.0: AGP 2.0 bridge
[4.779963] agpgart-amdk7 :00:00.0: putting AGP V2 device into 4x mode
[4.780106] radeon :01:05.0: putting AGP V2 device into 4x mode
[4.780179] radeon :01:05.0: GTT: 512M 0xC000 - 0xDFFF
[4.780249] radeon :01:05.0: VRAM: 128M 0xE000 - 
0xE7FF (64M used)
[4.780344] [drm] Detected VRAM RAM=128M, BAR=128M
[4.780405] [drm] RAM width 128bits DDR
[4.780622] [TTM] Zone  kernel: Available graphics memory: 439294 kiB
[4.780686] [TTM] Zone highmem: Available graphics memory: 1032930 kiB
[4.780749] [TTM] Initializing pool allocator
[4.780898] [drm] radeon: 64M of VRAM memory ready
[4.780961] [drm] radeon: 512M of GTT memory ready.
[4.783068] radeon :01:05.0: WB disabled
[4.783166] radeon :01:05.0: fence driver on ring 0 use gpu addr 
0xc000 and cpu addr 0xf6a59461
[4.783261] [drm] Supports vblank timestamp caching Rev 2 (21.10.2013).
[4.783323] [drm] Driver supports precise vblank timestamp query.
[4.783447] [drm] radeon: irq initialized.
[4.783509] [drm] Loading R200 Microcode
[4.819844] [drm] radeon: ring at 0xC0001000
[4.923034] [drm:r100_ring_test [radeon]] *ERROR* radeon: ring test failed 
(scratch(0x15E4)=0xCAFEDEAD)
[4.923181] [drm:r100_cp_init [radeon]] *ERROR* radeon: cp isn't working 
(-22).
[4.923271] radeon :01:05.0: failed initializing CP (-22).
[4.923332] radeon :01:05.0: Disabling GPU acceleration
[5.025789] [drm:r100_cp_fini [radeon]] *ERROR* Wait for CP idle timeout, 
shutting down CP.
[5.026022] [drm] radeon: cp finalized
[5.026128] [drm] radeon: cp finalized
[5.026223] [TTM] Finalizing pool allocator
[5.027245] [TTM] Zone  kernel: Used memory at exit: 0 kiB
[5.027331] [TTM] Zone highmem: Used memory at exit: 0 kiB
[5.027397] [drm] radeon: ttm finalized
[5.027468] [drm] Forcing AGP to PCI mode
[5.027886] radeon :01:05.0: VRAM: 128M 0xE000 - 
0xE7FF (64M used)
[5.028577] radeon :01:05.0: GTT: 512M 0xC000 - 
0xDFFF
[5.028671] [drm] Detected VRAM RAM=128M, BAR=128M
[5.028730] [drm] RAM width 128bits DDR
[5.028942] [TTM] Zone  kernel: Available graphics memory: 439294 kiB
[5.029005] [TTM] Zone highmem: Available graphics memory: 1032930 kiB
[5.029066] [TTM] Initializing pool allocator
[5.029216] [drm] radeon: 64M of VRAM memory ready
[5.029277] [drm] radeon: 512M of GTT memory ready.
[5.029342] [drm] GART: num cpu pages 131072, num gpu pages 131072
[5.034222] [drm] PCI GART of 512M enabled (table at 0x3560).
[5.034393] radeon :01:05.0: WB disabled
[5.034465] radeon :01:05.0: fence driver on ring 0 use gpu addr 
0xc000 and cpu addr 0x516b4fc4
[5.034557] [drm] Supports vblank timestamp caching Rev 2 (21.10.2013).
[5.034618] [drm] Driver supports precise vblank timestamp query.
[5.034729] [drm] radeon: irq initialized.
[5.034984] [drm] radeon: ring at 0xC0001000
[5.138470] [drm:r100_ring_test [radeon]] *ERROR* radeon: ring test failed 
(scratch(0x15E4)=0xCAFEDEAD)
[5.138618] [drm:r100_cp_init [radeon]] *ERROR* radeon: cp isn't working 
(-22).
[5.138704] radeon :01:05.0: failed initializing CP (-22).
[5.138764] radeon :01:05.0: Disabling GPU acceleration
[5.241223] [drm:r100_cp_fini [radeon]] *ERROR* Wait for CP idle timeout, 
shutting down CP.
[5.241408] [drm] radeon: cp finalized
[5.243484] [drm] No TV DAC info found in BIOS
[5.243659] [drm] Radeon Display Connectors
[5.243719] [drm] Connector 0:
[5.243776] [drm]   VGA-1
[5.243833] [drm]   DDC: 0x60 0x60 0x60 0x60 0x60 0x60 0x60 0x60
[5.243894] [drm]   Encoders:
[5.243951] [drm] CRT1: INTERNAL_DAC1
[5.244064] [drm] Connector 1:
[5.244120] [drm]   DVI-I-1
[5.244176] [drm]   HPD1
[5.244231] [drm]   DDC: 0x64 0x64 0x64 0x64 0x64 0x64 0x64 0x64
[5.244291] [drm]   Encoders:
[5.244346] [drm] CRT2: INTERNAL_DAC2
[5.244404] [drm] DFP1: INTERNAL_TMDS1
[5.244460] [drm] Connector 2:
[5.244516] [drm]   SVIDEO-1
[5.244571] [drm]   Encoders:
[5.244626] [drm] TV1: INTERNAL_DAC2
[5.262348] [drm] Cannot find any crtc or sizes
[5.262491] [drm] Ini

Re: Radeon rv250 GPU acceleration broken in 4.15-rc6

2018-01-03 Thread Meelis Roos
> Beacuse of PTI, I decided to test 4.15-rc6 on most of my test machines. 
> And I happened to notice a radeon problem on RV250 era card with no 
> monitor attached. 4.15-rc1 and earlier kernels did not show these 
> errors, not kernels between rc1 and rc6 have been tested yet.

Sorry, I can not reproduce it, so it looks like random noise from an old 
computer. About 10 reboots later, I have not managed to reproduce this 
problem.

-- 
Meelis Roos (mr...@linux.ee)
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: Deadlocks with multiple applications on AMD RX 460 and RX 550 - Update 2

2018-01-03 Thread Luís Mendes
Hi Michel, Christian,

Michel, I have tested amd-staging-drm-next at commit "drm/amdgpu/gfx9:
only init the apertures used by KGD (v2)" -
0e4946409d11913523d30bc4830d10b388438c7a and the issues remain, both
on ARMv7 and on x86 amd64.

Christian, in fact if I replay the apitraces obtained on the ARMv7
platform on the AMD64 I am also able to reproduce the GPU hang! So it
is not ARM platform specific. Should I send/upload the apitraces? I
have two of them, typically when one doesn't hang the gpu the other
hangs. One takes about 1GB of disk space while the other takes 2.3GB.
...
[   69.019381] ISO 9660 Extensions: RRIP_1991A
[  213.292094] DMAR: DRHD: handling fault status reg 2
[  213.292102] DMAR: [INTR-REMAP] Request device [00:00.0] fault index
1c [fault reason 38] Blocked an interrupt request due to source-id
verification failure
[  223.406919] [drm:amdgpu_job_timedout [amdgpu]] *ERROR* ring gfx
timeout, last signaled seq=25158, last emitted seq=25160
[  223.406926] [drm] IP block:tonga_ih is hung!
[  223.407167] [drm] GPU recovery disabled.

Regards,
Luís


On Wed, Jan 3, 2018 at 5:47 PM, Luís Mendes  wrote:
> Hi Michel, Christian,
>
> Christian, I have followed your suggestion and I have just submitted a
> bug to fdo at https://bugs.freedesktop.org/show_bug.cgi?id=104481 -
> GPU lockup Polaris 11 - AMD RX 460 and RX 550 on amd64 and on ARMv7
> platforms while playing video.
>
> Michel, amdgpu.dc=0 seems to make no difference. I will try
> amd-staging-drm-next and report back.
>
> Regards,
> Luís
>
> On Wed, Jan 3, 2018 at 5:09 PM, Michel Dänzer  wrote:
>> On 2018-01-03 12:02 PM, Luís Mendes wrote:
>>>
>>> What I believe it seems to be the case is that the GPU lock up only
>>> happens when doing a page flip, since the kernel locks with:
>>> [  243.693200] kworker/u4:3D089  2 0x
>>> [  243.693232] Workqueue: events_unbound commit_work [drm_kms_helper]
>>> [  243.693251] [<80b8c6d4>] (__schedule) from [<80b8cdd0>] 
>>> (schedule+0x4c/0xac)
>>> [  243.693259] [<80b8cdd0>] (schedule) from [<80b91024>]
>>> (schedule_timeout+0x228/0x444)
>>> [  243.693270] [<80b91024>] (schedule_timeout) from [<80886738>]
>>> (dma_fence_default_wait+0x2b4/0x2d8)
>>> [  243.693276] [<80886738>] (dma_fence_default_wait) from [<80885d60>]
>>> (dma_fence_wait_timeout+0x40/0x150)
>>> [  243.693284] [<80885d60>] (dma_fence_wait_timeout) from [<80887b1c>]
>>> (reservation_object_wait_timeout_rcu+0xfc/0x34c)
>>> [  243.693509] [<80887b1c>] (reservation_object_wait_timeout_rcu) from
>>> [<7f331988>] (amdgpu_dm_do_flip+0xec/0x36c [amdgpu])
>>> [  243.693789] [<7f331988>] (amdgpu_dm_do_flip [amdgpu]) from
>>> [<7f33309c>] (amdgpu_dm_atomic_commit_tail+0xbfc/0xe58 [amdgpu])
>>> ...
>>
>> Does the problem also occur if you disable DC with amdgpu.dc=0 on the
>> kernel command line?
>>
>> Does it also happen with a kernel built from the amd-staging-drm-next
>> branch instead of drm-next-4.16?
>>
>>
>> --
>> Earthling Michel Dänzer   |   http://www.amd.com
>> Libre software enthusiast | Mesa and X developer
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add high priority compute support for gfx9

2018-01-03 Thread Alex Deucher
On Tue, Jan 2, 2018 at 3:49 PM, Andres Rodriguez  wrote:
> We follow the same approach as gfx8. The only changes are register
> access macros.
>
> Tested on vega10. The execution latency results fall within the expected
> ranges from the polaris10 data.
>
> Signed-off-by: Andres Rodriguez 

Reviewed-by: Alex Deucher 

Do you want to send a patch to bump the driver version so you know
when this is available?

Alex

> ---
>  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 100 
> ++
>  1 file changed, 100 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index 9f7be230734c..80af928b153e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -3734,6 +3734,105 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct 
> amdgpu_ring *ring)
> return wptr;
>  }
>
> +static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
> +  bool acquire)
> +{
> +   struct amdgpu_device *adev = ring->adev;
> +   int pipe_num, tmp, reg;
> +   int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 
> 0x1;
> +
> +   pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
> +
> +   /* first me only has 2 entries, GFX and HP3D */
> +   if (ring->me > 0)
> +   pipe_num -= 2;
> +
> +   reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
> +   tmp = RREG32(reg);
> +   tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, 
> pipe_percent);
> +   WREG32(reg, tmp);
> +}
> +
> +static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
> +   struct amdgpu_ring *ring,
> +   bool acquire)
> +{
> +   int i, pipe;
> +   bool reserve;
> +   struct amdgpu_ring *iring;
> +
> +   mutex_lock(&adev->gfx.pipe_reserve_mutex);
> +   pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
> +   if (acquire)
> +   set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
> +   else
> +   clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
> +
> +   if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, 
> AMDGPU_MAX_COMPUTE_QUEUES)) {
> +   /* Clear all reservations - everyone reacquires all resources 
> */
> +   for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
> +   gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
> +  true);
> +
> +   for (i = 0; i < adev->gfx.num_compute_rings; ++i)
> +   
> gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
> +  true);
> +   } else {
> +   /* Lower all pipes without a current reservation */
> +   for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
> +   iring = &adev->gfx.gfx_ring[i];
> +   pipe = amdgpu_gfx_queue_to_bit(adev,
> +  iring->me,
> +  iring->pipe,
> +  0);
> +   reserve = test_bit(pipe, 
> adev->gfx.pipe_reserve_bitmap);
> +   gfx_v9_0_ring_set_pipe_percent(iring, reserve);
> +   }
> +
> +   for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
> +   iring = &adev->gfx.compute_ring[i];
> +   pipe = amdgpu_gfx_queue_to_bit(adev,
> +  iring->me,
> +  iring->pipe,
> +  0);
> +   reserve = test_bit(pipe, 
> adev->gfx.pipe_reserve_bitmap);
> +   gfx_v9_0_ring_set_pipe_percent(iring, reserve);
> +   }
> +   }
> +
> +   mutex_unlock(&adev->gfx.pipe_reserve_mutex);
> +}
> +
> +static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
> + struct amdgpu_ring *ring,
> + bool acquire)
> +{
> +   uint32_t pipe_priority = acquire ? 0x2 : 0x0;
> +   uint32_t queue_priority = acquire ? 0xf : 0x0;
> +
> +   mutex_lock(&adev->srbm_mutex);
> +   soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
> +
> +   WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
> +   WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
> +
> +   soc15_grbm_select(adev, 0, 0, 0, 0);
> +   mutex_unlock(&adev->srbm_mutex);
> +}
> +
> +static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
> +  enum drm_sched_priority 

RE: [PATCH] drm/amdgpu: optimize moved handling only when vm_debug is inactive

2018-01-03 Thread He, Roger
Fix my concern as well.

Reviewed-by: Roger He 

Thanks
Roger(Hongbo.He)
-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of 
Christian K?nig
Sent: Wednesday, January 03, 2018 8:37 PM
To: amd-gfx@lists.freedesktop.org
Subject: [PATCH] drm/amdgpu: optimize moved handling only when vm_debug is 
inactive

Otherwise we would completely circumvent that debugging feature.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 81505870eebc..cd1752b6afa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1685,7 +1685,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
if (resv == vm->root.base.bo->tbo.resv)
clear = false;
/* Try to reserve the BO to avoid clearing its ptes */
-   else if (reservation_object_trylock(resv))
+   else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
clear = false;
/* Somebody else is using the BO right now */
else
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal ranges

2018-01-03 Thread Quan, Evan
Not typo. It shares the SMU7 structure.

Regards,
Evan
>-Original Message-
>From: Alex Deucher [mailto:alexdeuc...@gmail.com]
>Sent: Thursday, January 04, 2018 4:18 AM
>To: Quan, Evan 
>Cc: amd-gfx list ; Deucher, Alexander
>
>Subject: Re: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal 
>ranges
>
>On Wed, Jan 3, 2018 at 3:16 PM, Alex Deucher  wrote:
>> On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
>>> Change-Id: If240a45dd2538e93185802b1fce54fd83de89de0
>>> Signed-off-by: Evan Quan 
>>> ---
>>>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 21
>+
>>>  1 file changed, 21 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> index 2d55dab..ed16468 100644
>>> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> @@ -4988,6 +4988,26 @@ static int vega10_notify_cac_buffer_info(struct 
>>> pp_hwmgr
>*hwmgr,
>>> return 0;
>>>  }
>>>
>>> +static struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =
>>
>> Copy paste typo?  s/SMU7/vega10/?
>
>With that fixed:
>Reviewed-by: Alex Deucher 
>
>>
>> Alex
>>
>>> +{
>>> +   {-273150,  99000},
>>> +   { 12, 12},
>>> +};
>>> +
>>> +static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
>>> +   struct PP_TemperatureRange *thermal_data)
>>> +{
>>> +   struct phm_ppt_v2_information *table_info =
>>> +   (struct phm_ppt_v2_information *)hwmgr->pptable;
>>> +
>>> +   memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct
>PP_TemperatureRange));
>>> +
>>> +   thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
>>> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
>>> +
>>> +   return 0;
>>> +}
>>> +
>>>  static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
>>> const void *info)
>>>  {
>>> @@ -5074,6 +5094,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs 
>>> = {
>>> .set_mclk_od = vega10_set_mclk_od,
>>> .avfs_control = vega10_avfs_enable,
>>> .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
>>> +   .get_thermal_temperature_range = 
>>> vega10_get_thermal_temperature_range,
>>> .register_internal_thermal_interrupt = 
>>> vega10_register_thermal_interrupt,
>>> .start_thermal_controller = vega10_start_thermal_controller,
>>>  };
>>> --
>>> 2.7.4
>>>
>>> ___
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal ranges

2018-01-03 Thread Alex Deucher
On Wed, Jan 3, 2018 at 9:25 PM, Quan, Evan  wrote:
> Not typo. It shares the SMU7 structure.

If all of these parts use the same values, can we just add it to some
common place?  Also, all of these can be const I think.

Alex

>
> Regards,
> Evan
>>-Original Message-
>>From: Alex Deucher [mailto:alexdeuc...@gmail.com]
>>Sent: Thursday, January 04, 2018 4:18 AM
>>To: Quan, Evan 
>>Cc: amd-gfx list ; Deucher, Alexander
>>
>>Subject: Re: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal 
>>ranges
>>
>>On Wed, Jan 3, 2018 at 3:16 PM, Alex Deucher  wrote:
>>> On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
 Change-Id: If240a45dd2538e93185802b1fce54fd83de89de0
 Signed-off-by: Evan Quan 
 ---
  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 21
>>+
  1 file changed, 21 insertions(+)

 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
 index 2d55dab..ed16468 100644
 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
 @@ -4988,6 +4988,26 @@ static int vega10_notify_cac_buffer_info(struct 
 pp_hwmgr
>>*hwmgr,
 return 0;
  }

 +static struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =
>>>
>>> Copy paste typo?  s/SMU7/vega10/?
>>
>>With that fixed:
>>Reviewed-by: Alex Deucher 
>>
>>>
>>> Alex
>>>
 +{
 +   {-273150,  99000},
 +   { 12, 12},
 +};
 +
 +static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
 +   struct PP_TemperatureRange *thermal_data)
 +{
 +   struct phm_ppt_v2_information *table_info =
 +   (struct phm_ppt_v2_information *)hwmgr->pptable;
 +
 +   memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct
>>PP_TemperatureRange));
 +
 +   thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
 +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +
 +   return 0;
 +}
 +
  static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
 const void *info)
  {
 @@ -5074,6 +5094,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs 
 = {
 .set_mclk_od = vega10_set_mclk_od,
 .avfs_control = vega10_avfs_enable,
 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
 +   .get_thermal_temperature_range = 
 vega10_get_thermal_temperature_range,
 .register_internal_thermal_interrupt = 
 vega10_register_thermal_interrupt,
 .start_thermal_controller = vega10_start_thermal_controller,
  };
 --
 2.7.4

 ___
 amd-gfx mailing list
 amd-gfx@lists.freedesktop.org
 https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/powerplay: fix compile warning

2018-01-03 Thread Evan Quan
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c: In
function 'phm_start_thermal_controller':
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
warning: braces around scalar initializer [enabled by default]
  struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
 ^
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
warning: (near initialization for 'range.min') [enabled by default]
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
warning: excess elements in scalar initializer [enabled by default]
drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
warning: (near initialization for 'range.min') [enabled by default]

Change-Id: I321ef2f0c5fe3ff27c0414145b53c0e0250bf837
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 84e1cb0..fdd2c05 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -228,7 +228,7 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, 
const void *info)
 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
 {
int ret = 0;
-   struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
+   struct PP_TemperatureRange range = {TEMP_RANGE_MIN, TEMP_RANGE_MAX};
 
if (hwmgr->hwmgr_func->get_thermal_temperature_range)
hwmgr->hwmgr_func->get_thermal_temperature_range(
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/powerplay: fix compile warning

2018-01-03 Thread Quan, Evan
Please ignore this patch since i found there was already fix for this.

Regards,
Evan
>-Original Message-
>From: Evan Quan [mailto:evan.q...@amd.com]
>Sent: Thursday, January 04, 2018 10:55 AM
>To: amd-gfx@lists.freedesktop.org
>Cc: Quan, Evan 
>Subject: [PATCH] drm/amd/powerplay: fix compile warning
>
>drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c: In
>function 'phm_start_thermal_controller':
>drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
>warning: braces around scalar initializer [enabled by default]
>  struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
> ^
>drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
>warning: (near initialization for 'range.min') [enabled by default]
>drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
>warning: excess elements in scalar initializer [enabled by default]
>drivers/gpu/drm/amd/amdgpu/../powerplay/hwmgr/hardwaremanager.c:231:9:
>warning: (near initialization for 'range.min') [enabled by default]
>
>Change-Id: I321ef2f0c5fe3ff27c0414145b53c0e0250bf837
>Signed-off-by: Evan Quan 
>---
> drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
>diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
>b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
>index 84e1cb0..fdd2c05 100644
>--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
>+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
>@@ -228,7 +228,7 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
>const void *info)
> int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
> {
>   int ret = 0;
>-  struct PP_TemperatureRange range = {{TEMP_RANGE_MIN, TEMP_RANGE_MAX}};
>+  struct PP_TemperatureRange range = {TEMP_RANGE_MIN, TEMP_RANGE_MAX};
>
>   if (hwmgr->hwmgr_func->get_thermal_temperature_range)
>   hwmgr->hwmgr_func->get_thermal_temperature_range(
>--
>2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add high priority compute support for gfx9

2018-01-03 Thread Andres Rodriguez



On 2018-01-03 06:29 PM, Alex Deucher wrote:

On Tue, Jan 2, 2018 at 3:49 PM, Andres Rodriguez  wrote:

We follow the same approach as gfx8. The only changes are register
access macros.

Tested on vega10. The execution latency results fall within the expected
ranges from the polaris10 data.

Signed-off-by: Andres Rodriguez 


Reviewed-by: Alex Deucher 


Thanks, and a happy new year to you :)


Do you want to send a patch to bump the driver version so you know
when this is available?



That would be perfect. I'll send it out in the morning.

Regards,
Andres


Alex


---
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 100 ++
  1 file changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9f7be230734c..80af928b153e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3734,6 +3734,105 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct 
amdgpu_ring *ring)
 return wptr;
  }

+static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
+  bool acquire)
+{
+   struct amdgpu_device *adev = ring->adev;
+   int pipe_num, tmp, reg;
+   int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
+
+   pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
+
+   /* first me only has 2 entries, GFX and HP3D */
+   if (ring->me > 0)
+   pipe_num -= 2;
+
+   reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
+   tmp = RREG32(reg);
+   tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
+   WREG32(reg, tmp);
+}
+
+static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
+   struct amdgpu_ring *ring,
+   bool acquire)
+{
+   int i, pipe;
+   bool reserve;
+   struct amdgpu_ring *iring;
+
+   mutex_lock(&adev->gfx.pipe_reserve_mutex);
+   pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
+   if (acquire)
+   set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+   else
+   clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+
+   if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES)) {
+   /* Clear all reservations - everyone reacquires all resources */
+   for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
+   gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
+  true);
+
+   for (i = 0; i < adev->gfx.num_compute_rings; ++i)
+   
gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
+  true);
+   } else {
+   /* Lower all pipes without a current reservation */
+   for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+   iring = &adev->gfx.gfx_ring[i];
+   pipe = amdgpu_gfx_queue_to_bit(adev,
+  iring->me,
+  iring->pipe,
+  0);
+   reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+   gfx_v9_0_ring_set_pipe_percent(iring, reserve);
+   }
+
+   for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+   iring = &adev->gfx.compute_ring[i];
+   pipe = amdgpu_gfx_queue_to_bit(adev,
+  iring->me,
+  iring->pipe,
+  0);
+   reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+   gfx_v9_0_ring_set_pipe_percent(iring, reserve);
+   }
+   }
+
+   mutex_unlock(&adev->gfx.pipe_reserve_mutex);
+}
+
+static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+   uint32_t pipe_priority = acquire ? 0x2 : 0x0;
+   uint32_t queue_priority = acquire ? 0xf : 0x0;
+
+   mutex_lock(&adev->srbm_mutex);
+   soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+   WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+   WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+
+   soc15_grbm_select(adev, 0, 0, 0, 0);
+   mutex_unlock(&adev->srbm_mutex);
+}
+
+static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+  enum drm_sched_priority priority)
+{
+   struct amdgpu_device *adev = ring->adev;
+

RE: [PATCH] drm/amd/powerplay: fix memory leakage when reload

2018-01-03 Thread Tao, Yintian
Hi Alex

Thanks a lot. I got it.

Best Regards
Yintian Tao

From: Deucher, Alexander
Sent: Wednesday, January 03, 2018 10:32 PM
To: Tao, Yintian ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: fix memory leakage when reload


Did you see my reply yesterday?  I reviewed it.  I also think we need to fix up 
cz, rv, and vg10.


From: Tao, Yintian
Sent: Tuesday, January 2, 2018 9:22:23 PM
To: Tao, Yintian; 
amd-gfx@lists.freedesktop.org; Deucher, 
Alexander
Subject: RE: [PATCH] drm/amd/powerplay: fix memory leakage when reload

Add Alex

-Original Message-
From: Yintian Tao [mailto:yt...@amd.com]
Sent: Monday, January 01, 2018 11:16 AM
To: amd-gfx@lists.freedesktop.org
Cc: Tao, Yintian mailto:yintian@amd.com>>
Subject: [PATCH] drm/amd/powerplay: fix memory leakage when reload

add smu_free_memory when smu fini to prevent memory leakage

Change-Id: Id9103d8b54869b63f22a9af53d9fbc3b7a221191
Signed-off-by: Yintian Tao mailto:yt...@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c 
b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index c49a6f2..925217e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -607,6 +607,12 @@ int smu7_init(struct pp_smumgr *smumgr)

 int smu7_smu_fini(struct pp_smumgr *smumgr)  {
+   struct smu7_smumgr *smu_data = (struct smu7_smumgr
+*)(smumgr->backend);
+
+   smu_free_memory(smumgr->device, smu_data->header_buffer.handle);
+   if (!cgs_is_virtualization_enabled(smumgr->device))
+   smu_free_memory(smumgr->device, smu_data->smu_buffer.handle);
+
 if (smumgr->backend) {
 kfree(smumgr->backend);
 smumgr->backend = NULL;
--
2.7.4
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pp: Implement get_max_high_clocks for CI/VI

2018-01-03 Thread Rex Zhu
DC component expect PP to give max engine clock and
memory clock through pp_get_display_mode_validation_clocks
on DGPU as well.

This patch can fix MultiGPU-Display blank
out with 1 IGPU-4k display and 2 DGPU-two 4K
displays.

Change-Id: I20454060ebe01955c5653de037dd8c09a576026a
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 17 +
 1 file changed, 17 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 444cc35..e0438dd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4667,6 +4667,22 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
*hwmgr,
return 0;
 }
 
+static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
+   struct amd_pp_simple_clock_info *clocks)
+{
+   struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+   struct smu7_single_dpm_table *sclk_table = 
&(data->dpm_table.sclk_table);
+   struct smu7_single_dpm_table *mclk_table = 
&(data->dpm_table.mclk_table);
+
+   if (clocks == NULL)
+   return -EINVAL;
+
+   clocks->memory_max_clock = 
mclk_table->dpm_levels[mclk_table->count-1].value;
+   clocks->engine_max_clock = 
sclk_table->dpm_levels[sclk_table->count-1].value;
+
+   return 0;
+}
+
 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.backend_init = &smu7_hwmgr_backend_init,
.backend_fini = &smu7_hwmgr_backend_fini,
@@ -4719,6 +4735,7 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
*hwmgr,
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
.start_thermal_controller = smu7_start_thermal_controller,
.notify_cac_buffer_info = smu7_notify_cac_buffer_info,
+   .get_max_high_clocks = smu7_get_max_high_clocks,
 };
 
 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/3] drm/amd/powerplay: export the thermal ranges of Carrizo (V2)

2018-01-03 Thread Evan Quan
V2: reuse the SMUThermal structure defined in pp_thermal.h

Change-Id: I861e3e6d4ec553171cbf369eca4ac9d834478290
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index b314d09..0b8aa44 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -38,6 +38,7 @@
 #include "cz_hwmgr.h"
 #include "power_state.h"
 #include "cz_clockpowergating.h"
+#include "pp_thermal.h"
 
 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
 #define CURRENT_NB_VID_MASK 0xff00
@@ -1858,6 +1859,19 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr 
*hwmgr,
return 0;
 }
 
+static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+   struct PP_TemperatureRange *thermal_data)
+{
+   struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+   memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct 
PP_TemperatureRange));
+
+   thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
+   cz_hwmgr->sys_info.htc_hyst_lmt) *
+   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+   return 0;
+}
 
 static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
@@ -1890,6 +1904,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.power_state_set = cz_set_power_state_tasks,
.dynamic_state_management_disable = cz_disable_dpm_tasks,
.notify_cac_buffer_info = cz_notify_cac_buffer_info,
+   .get_thermal_temperature_range = cz_get_thermal_temperature_range,
 };
 
 int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/3] drm/amd/powerplay: export the thermal ranges of VI asics (V2)

2018-01-03 Thread Evan Quan
V2: move the SMU7Thermal structure to newly created header file

Change-Id: I569179443c73c793153d5c499dd2f203f89e3ca2
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 21 +
 drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h   |  6 ++
 2 files changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 8d7fd06..df25d91 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,7 @@
 #include "smu7_thermal.h"
 #include "smu7_clockpowergating.h"
 #include "processpptables.h"
+#include "pp_thermal.h"
 
 #define MC_CG_ARB_FREQ_F0   0x0a
 #define MC_CG_ARB_FREQ_F1   0x0b
@@ -4655,6 +4656,25 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
*hwmgr,
return 0;
 }
 
+static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+   struct PP_TemperatureRange *thermal_data)
+{
+   struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+   struct phm_ppt_v1_information *table_info =
+   (struct phm_ppt_v1_information *)hwmgr->pptable;
+
+   memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct 
PP_TemperatureRange));
+
+   if (hwmgr->pp_table_version == PP_TABLE_V1)
+   thermal_data->max = 
table_info->cac_dtp_table->usSoftwareShutdownTemp *
+   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+   else if (hwmgr->pp_table_version == PP_TABLE_V0)
+   thermal_data->max = 
data->thermal_temp_setting.temperature_shutdown *
+   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+   return 0;
+}
+
 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.backend_init = &smu7_hwmgr_backend_init,
.backend_fini = &smu7_hwmgr_backend_fini,
@@ -4707,6 +4727,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
.start_thermal_controller = smu7_start_thermal_controller,
.notify_cac_buffer_info = smu7_notify_cac_buffer_info,
+   .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
 };
 
 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h 
b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
index 82b810a..201d2b6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
@@ -31,4 +31,10 @@ static const struct PP_TemperatureRange 
SMU7ThermalWithDelayPolicy[] =
{ 12, 12},
 };
 
+static const struct PP_TemperatureRange SMU7ThermalPolicy[] =
+{
+   {-273150,  99000},
+   { 12, 12},
+};
+
 #endif
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal ranges

2018-01-03 Thread Quan, Evan
Created a new header file to place the shared structures.
Please check the coming V2 patches.

Regards,
Evan
>-Original Message-
>From: Alex Deucher [mailto:alexdeuc...@gmail.com]
>Sent: Thursday, January 04, 2018 10:53 AM
>To: Quan, Evan 
>Cc: amd-gfx list ; Deucher, Alexander
>
>Subject: Re: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal 
>ranges
>
>On Wed, Jan 3, 2018 at 9:25 PM, Quan, Evan  wrote:
>> Not typo. It shares the SMU7 structure.
>
>If all of these parts use the same values, can we just add it to some
>common place?  Also, all of these can be const I think.
>
>Alex
>
>>
>> Regards,
>> Evan
>>>-Original Message-
>>>From: Alex Deucher [mailto:alexdeuc...@gmail.com]
>>>Sent: Thursday, January 04, 2018 4:18 AM
>>>To: Quan, Evan 
>>>Cc: amd-gfx list ; Deucher, Alexander
>>>
>>>Subject: Re: [PATCH 5/7] drm/amd/powerplay: export vega10 specific thermal 
>>>ranges
>>>
>>>On Wed, Jan 3, 2018 at 3:16 PM, Alex Deucher  wrote:
 On Tue, Jan 2, 2018 at 10:15 PM, Evan Quan  wrote:
> Change-Id: If240a45dd2538e93185802b1fce54fd83de89de0
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 21
>>>+
>  1 file changed, 21 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index 2d55dab..ed16468 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -4988,6 +4988,26 @@ static int vega10_notify_cac_buffer_info(struct
>pp_hwmgr
>>>*hwmgr,
> return 0;
>  }
>
> +static struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =

 Copy paste typo?  s/SMU7/vega10/?
>>>
>>>With that fixed:
>>>Reviewed-by: Alex Deucher 
>>>

 Alex

> +{
> +   {-273150,  99000},
> +   { 12, 12},
> +};
> +
> +static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *thermal_data)
> +{
> +   struct phm_ppt_v2_information *table_info =
> +   (struct phm_ppt_v2_information *)hwmgr->pptable;
> +
> +   memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct
>>>PP_TemperatureRange));
> +
> +   thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp 
> *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +
> +   return 0;
> +}
> +
>  static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
> const void *info)
>  {
> @@ -5074,6 +5094,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs
>= {
> .set_mclk_od = vega10_set_mclk_od,
> .avfs_control = vega10_avfs_enable,
> .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
> +   .get_thermal_temperature_range = 
> vega10_get_thermal_temperature_range,
> .register_internal_thermal_interrupt = 
> vega10_register_thermal_interrupt,
> .start_thermal_controller = vega10_start_thermal_controller,
>  };
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/3] drm/amd/powerplay: export the thermal ranges of VI asics (V2)

2018-01-03 Thread Alex Deucher
On Wed, Jan 3, 2018 at 11:17 PM, Evan Quan  wrote:
> V2: move the SMU7Thermal structure to newly created header file
>
> Change-Id: I569179443c73c793153d5c499dd2f203f89e3ca2
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 21 +
>  drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h   |  6 ++
>  2 files changed, 27 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> index 8d7fd06..df25d91 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> @@ -48,6 +48,7 @@
>  #include "smu7_thermal.h"
>  #include "smu7_clockpowergating.h"
>  #include "processpptables.h"
> +#include "pp_thermal.h"
>
>  #define MC_CG_ARB_FREQ_F0   0x0a
>  #define MC_CG_ARB_FREQ_F1   0x0b
> @@ -4655,6 +4656,25 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
> *hwmgr,
> return 0;
>  }
>
> +static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *thermal_data)
> +{
> +   struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
> +   struct phm_ppt_v1_information *table_info =
> +   (struct phm_ppt_v1_information *)hwmgr->pptable;
> +
> +   memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct 
> PP_TemperatureRange));
> +
> +   if (hwmgr->pp_table_version == PP_TABLE_V1)
> +   thermal_data->max = 
> table_info->cac_dtp_table->usSoftwareShutdownTemp *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +   else if (hwmgr->pp_table_version == PP_TABLE_V0)
> +   thermal_data->max = 
> data->thermal_temp_setting.temperature_shutdown *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +
> +   return 0;
> +}
> +
>  static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
> .backend_init = &smu7_hwmgr_backend_init,
> .backend_fini = &smu7_hwmgr_backend_fini,
> @@ -4707,6 +4727,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
> .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
> .start_thermal_controller = smu7_start_thermal_controller,
> .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
> +   .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
>  };
>
>  uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h 
> b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
> index 82b810a..201d2b6 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
> @@ -31,4 +31,10 @@ static const struct PP_TemperatureRange 
> SMU7ThermalWithDelayPolicy[] =
> { 12, 12},
>  };
>
> +static const struct PP_TemperatureRange SMU7ThermalPolicy[] =
> +{
> +   {-273150,  99000},
> +   { 12, 12},
> +};

This is identical to the other temp range you added in patch 1.

Alex

> +
>  #endif
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/3] drm/amd/powerplay: export the thermal ranges of Carrizo (V2)

2018-01-03 Thread Alex Deucher
On Wed, Jan 3, 2018 at 11:17 PM, Evan Quan  wrote:
> V2: reuse the SMUThermal structure defined in pp_thermal.h
>
> Change-Id: I861e3e6d4ec553171cbf369eca4ac9d834478290
> Signed-off-by: Evan Quan 

Series is:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 15 +++
>  1 file changed, 15 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> index b314d09..0b8aa44 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> @@ -38,6 +38,7 @@
>  #include "cz_hwmgr.h"
>  #include "power_state.h"
>  #include "cz_clockpowergating.h"
> +#include "pp_thermal.h"
>
>  #define ixSMUSVI_NB_CURRENTVID 0xD8230044
>  #define CURRENT_NB_VID_MASK 0xff00
> @@ -1858,6 +1859,19 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr 
> *hwmgr,
> return 0;
>  }
>
> +static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
> +   struct PP_TemperatureRange *thermal_data)
> +{
> +   struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
> +
> +   memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct 
> PP_TemperatureRange));
> +
> +   thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
> +   cz_hwmgr->sys_info.htc_hyst_lmt) *
> +   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
> +
> +   return 0;
> +}
>
>  static const struct pp_hwmgr_func cz_hwmgr_funcs = {
> .backend_init = cz_hwmgr_backend_init,
> @@ -1890,6 +1904,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
> .power_state_set = cz_set_power_state_tasks,
> .dynamic_state_management_disable = cz_disable_dpm_tasks,
> .notify_cac_buffer_info = cz_notify_cac_buffer_info,
> +   .get_thermal_temperature_range = cz_get_thermal_temperature_range,
>  };
>
>  int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/3] drm/amd/powerplay: export vega10 specific thermal ranges (V2)

2018-01-03 Thread Evan Quan
V2: new header file to hold the common SMU7Thermal structure

Change-Id: If240a45dd2538e93185802b1fce54fd83de89de0
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 16 ++
 drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h | 34 ++
 2 files changed, 50 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 2d55dab..455becd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -49,6 +49,7 @@
 #include "cgs_linux.h"
 #include "ppinterrupt.h"
 #include "pp_overdriver.h"
+#include "pp_thermal.h"
 
 #define VOLTAGE_SCALE  4
 #define VOLTAGE_VID_OFFSET_SCALE1   625
@@ -4988,6 +4989,20 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr 
*hwmgr,
return 0;
 }
 
+static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+   struct PP_TemperatureRange *thermal_data)
+{
+   struct phm_ppt_v2_information *table_info =
+   (struct phm_ppt_v2_information *)hwmgr->pptable;
+
+   memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct 
PP_TemperatureRange));
+
+   thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
+   PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+   return 0;
+}
+
 static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
const void *info)
 {
@@ -5074,6 +5089,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.set_mclk_od = vega10_set_mclk_od,
.avfs_control = vega10_avfs_enable,
.notify_cac_buffer_info = vega10_notify_cac_buffer_info,
+   .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
.register_internal_thermal_interrupt = 
vega10_register_thermal_interrupt,
.start_thermal_controller = vega10_start_thermal_controller,
 };
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h 
b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
new file mode 100644
index 000..82b810a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_THERMAL_H
+#define PP_THERMAL_H
+
+#include "power_state.h"
+
+static const struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =
+{
+   {-273150,  99000},
+   { 12, 12},
+};
+
+#endif
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH libdrm] amdgpu: fix not to add amdgpu.ids when building without amdgpu

2018-01-03 Thread Seung-Woo Kim
The amdgpu.ids is only required when building with amdgpu support.
Fix not to add it without amdgpu.

Signed-off-by: Seung-Woo Kim 
---
 data/Makefile.am |2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/data/Makefile.am b/data/Makefile.am
index eba915d..897a7f3 100644
--- a/data/Makefile.am
+++ b/data/Makefile.am
@@ -20,4 +20,6 @@
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 libdrmdatadir = @libdrmdatadir@
+if HAVE_AMDGPU
 dist_libdrmdata_DATA = amdgpu.ids
+endif
-- 
1.7.4.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH v2] drm/amd/pp: Implement get_max_high_clocks for CI/VI

2018-01-03 Thread Rex Zhu
v2: add table length check.

DC component expect PP to give max engine clock and
memory clock through pp_get_display_mode_validation_clocks
on DGPU as well.

This patch can fix MultiGPU-Display blank
out with 1 IGPU-4k display and 2 DGPU-two 4K
displays.

Change-Id: I20454060ebe01955c5653de037dd8c09a576026a
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 21 +
 1 file changed, 21 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 444cc35..f4ada46 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4667,6 +4667,26 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
*hwmgr,
return 0;
 }
 
+static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
+   struct amd_pp_simple_clock_info *clocks)
+{
+   struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+   struct smu7_single_dpm_table *sclk_table = 
&(data->dpm_table.sclk_table);
+   struct smu7_single_dpm_table *mclk_table = 
&(data->dpm_table.mclk_table);
+
+   if (clocks == NULL)
+   return -EINVAL;
+
+   clocks->memory_max_clock = mclk_table->count > 1 ?
+   
mclk_table->dpm_levels[mclk_table->count-1].value :
+   mclk_table->dpm_levels[0].value;
+   clocks->engine_max_clock = sclk_table->count > 1 ?
+   
sclk_table->dpm_levels[sclk_table->count-1].value :
+   sclk_table->dpm_levels[0].value;
+
+   return 0;
+}
+
 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.backend_init = &smu7_hwmgr_backend_init,
.backend_fini = &smu7_hwmgr_backend_fini,
@@ -4719,6 +4739,7 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
*hwmgr,
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
.start_thermal_controller = smu7_start_thermal_controller,
.notify_cac_buffer_info = smu7_notify_cac_buffer_info,
+   .get_max_high_clocks = smu7_get_max_high_clocks,
 };
 
 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amd/pp: Implement get_max_high_clocks for CI/VI

2018-01-03 Thread Alex Deucher
On Thu, Jan 4, 2018 at 1:01 AM, Rex Zhu  wrote:
> v2: add table length check.
>
> DC component expect PP to give max engine clock and
> memory clock through pp_get_display_mode_validation_clocks
> on DGPU as well.
>
> This patch can fix MultiGPU-Display blank
> out with 1 IGPU-4k display and 2 DGPU-two 4K
> displays.
>
> Change-Id: I20454060ebe01955c5653de037dd8c09a576026a
> Signed-off-by: Rex Zhu 
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 21 +
>  1 file changed, 21 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> index 444cc35..f4ada46 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> @@ -4667,6 +4667,26 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
> *hwmgr,
> return 0;
>  }
>
> +static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
> +   struct amd_pp_simple_clock_info 
> *clocks)
> +{
> +   struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
> +   struct smu7_single_dpm_table *sclk_table = 
> &(data->dpm_table.sclk_table);
> +   struct smu7_single_dpm_table *mclk_table = 
> &(data->dpm_table.mclk_table);
> +
> +   if (clocks == NULL)

Is it possible that data or sclk or mclk table could be null here?
With that addressed:
Reviewed-by: Alex Deucher 

Looks like vega10 is also missing this callback.

Alex

> +   return -EINVAL;
> +
> +   clocks->memory_max_clock = mclk_table->count > 1 ?
> +   
> mclk_table->dpm_levels[mclk_table->count-1].value :
> +   mclk_table->dpm_levels[0].value;
> +   clocks->engine_max_clock = sclk_table->count > 1 ?
> +   
> sclk_table->dpm_levels[sclk_table->count-1].value :
> +   sclk_table->dpm_levels[0].value;
> +
> +   return 0;
> +}
> +
>  static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
> .backend_init = &smu7_hwmgr_backend_init,
> .backend_fini = &smu7_hwmgr_backend_fini,
> @@ -4719,6 +4739,7 @@ static int smu7_notify_cac_buffer_info(struct pp_hwmgr 
> *hwmgr,
> .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
> .start_thermal_controller = smu7_start_thermal_controller,
> .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
> +   .get_max_high_clocks = smu7_get_max_high_clocks,
>  };
>
>  uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
> --
> 1.9.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx