Re: [PATCH v2 2/8] drm/ttm: Remap all page faults to per process dummy page.

2020-11-10 Thread Andrey Grodzovsky



On 6/22/20 5:41 AM, Daniel Vetter wrote:

On Sun, Jun 21, 2020 at 02:03:02AM -0400, Andrey Grodzovsky wrote:

On device removal reroute all CPU mappings to dummy page per drm_file
instance or imported GEM object.

Signed-off-by: Andrey Grodzovsky 
---
  drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 -
  1 file changed, 57 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 389128b..2f8bf5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -35,6 +35,8 @@
  #include 
  #include 
  #include 
+#include 
+#include 
  #include 
  #include 
  #include 
@@ -328,19 +330,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)

Hm I think diff and code flow look a bit bad now. What about renaming the
current function to __ttm_bo_vm_fault and then having something like the
below:

ttm_bo_vm_fault(args) {

if (drm_dev_enter()) {
__ttm_bo_vm_fault(args);
drm_dev_exit();
} else  {
drm_gem_insert_dummy_pfn();
}
}

I think drm_gem_insert_dummy_pfn(); should be portable across drivers, so
another nice point to try to unifiy drivers as much as possible.
-Daniel


pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
vm_fault_t ret;
+   int idx;
+   struct drm_device *ddev = bo->base.dev;
  
-	ret = ttm_bo_vm_reserve(bo, vmf);

-   if (ret)
-   return ret;
+   if (drm_dev_enter(ddev, &idx)) {
+   ret = ttm_bo_vm_reserve(bo, vmf);
+   if (ret)
+   goto exit;
+
+   prot = vma->vm_page_prot;
  
-	prot = vma->vm_page_prot;

-   ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
-   if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+   ret = ttm_bo_vm_fault_reserved(vmf, prot, 
TTM_BO_VM_NUM_PREFAULT);
+   if (ret == VM_FAULT_RETRY && !(vmf->flags & 
FAULT_FLAG_RETRY_NOWAIT))
+   goto exit;
+
+   dma_resv_unlock(bo->base.resv);
+
+exit:
+   drm_dev_exit(idx);
return ret;
+   } else {
  
-	dma_resv_unlock(bo->base.resv);

+   struct drm_file *file = NULL;
+   struct page *dummy_page = NULL;
+   int handle;
  
-	return ret;

+   /* We are faulting on imported BO from dma_buf */
+   if (bo->base.dma_buf && bo->base.import_attach) {
+   dummy_page = bo->base.dummy_page;
+   /* We are faulting on non imported BO, find drm_file owning the 
BO*/

Uh, we can't fish that out of the vma->vm_file pointer somehow? Or is that
one all wrong? Doing this kind of list walk looks pretty horrible.

If the vma doesn't have the right pointer I guess next option is that we
store the drm_file page in gem_bo->dummy_page, and replace it on first
export. But that's going to be tricky to track ...



For this one I hope to make all of this obsolete if Christian's suggestion from 
path 1/8 about mapping
global RO dummy page for read and COW on write will be possible to implement 
(testing

that indeed no memory usage explodes)

Andrey





+   } else {
+   struct drm_gem_object *gobj;
+
+   mutex_lock(&ddev->filelist_mutex);
+   list_for_each_entry(file, &ddev->filelist, lhead) {
+   spin_lock(&file->table_lock);
+   idr_for_each_entry(&file->object_idr, gobj, 
handle) {
+   if (gobj == &bo->base) {
+   dummy_page = file->dummy_page;
+   break;
+   }
+   }
+   spin_unlock(&file->table_lock);
+   }
+   mutex_unlock(&ddev->filelist_mutex);
+   }
+
+   if (dummy_page) {
+   /*
+* Let do_fault complete the PTE install e.t.c using 
vmf->page
+*
+* TODO - should i call free_page somewhere ?

Nah, instead don't call get_page. The page will be around as long as
there's a reference for the drm_file or gem_bo, which is longer than any
mmap. Otherwise yes this would like really badly.


+*/
+   get_page(dummy_page);
+   vmf->page = dummy_page;
+   return 0;
+   } else {
+   return VM_FAULT_SIGSEGV;

Hm that would be a kernel bug, wouldn't it? WARN_ON() required here imo.
-Daniel


+   }
+   }
  }
  EXPORT_SYMBOL(ttm_bo_vm_fault);
  
--

2.7.4


___
amd-gfx mailing li

Re: [PATCH v2 2/8] drm/ttm: Remap all page faults to per process dummy page.

2020-06-24 Thread Daniel Vetter
On Tue, Jun 23, 2020 at 11:31:45PM -0400, Andrey Grodzovsky wrote:
> 
> On 6/22/20 5:41 AM, Daniel Vetter wrote:
> > On Sun, Jun 21, 2020 at 02:03:02AM -0400, Andrey Grodzovsky wrote:
> > > On device removal reroute all CPU mappings to dummy page per drm_file
> > > instance or imported GEM object.
> > > 
> > > Signed-off-by: Andrey Grodzovsky 
> > > ---
> > >   drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 
> > > -
> > >   1 file changed, 57 insertions(+), 8 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c 
> > > b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> > > index 389128b..2f8bf5e 100644
> > > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> > > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> > > @@ -35,6 +35,8 @@
> > >   #include 
> > >   #include 
> > >   #include 
> > > +#include 
> > > +#include 
> > >   #include 
> > >   #include 
> > >   #include 
> > > @@ -328,19 +330,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
> > Hm I think diff and code flow look a bit bad now. What about renaming the
> > current function to __ttm_bo_vm_fault and then having something like the
> > below:
> > 
> > ttm_bo_vm_fault(args) {
> > 
> > if (drm_dev_enter()) {
> > __ttm_bo_vm_fault(args);
> > drm_dev_exit();
> > } else  {
> > drm_gem_insert_dummy_pfn();
> > }
> > }
> > 
> > I think drm_gem_insert_dummy_pfn(); should be portable across drivers, so
> > another nice point to try to unifiy drivers as much as possible.
> > -Daniel
> > 
> > >   pgprot_t prot;
> > >   struct ttm_buffer_object *bo = vma->vm_private_data;
> > >   vm_fault_t ret;
> > > + int idx;
> > > + struct drm_device *ddev = bo->base.dev;
> > > - ret = ttm_bo_vm_reserve(bo, vmf);
> > > - if (ret)
> > > - return ret;
> > > + if (drm_dev_enter(ddev, &idx)) {
> > > + ret = ttm_bo_vm_reserve(bo, vmf);
> > > + if (ret)
> > > + goto exit;
> > > +
> > > + prot = vma->vm_page_prot;
> > > - prot = vma->vm_page_prot;
> > > - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
> > > - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
> > > + ret = ttm_bo_vm_fault_reserved(vmf, prot, 
> > > TTM_BO_VM_NUM_PREFAULT);
> > > + if (ret == VM_FAULT_RETRY && !(vmf->flags & 
> > > FAULT_FLAG_RETRY_NOWAIT))
> > > + goto exit;
> > > +
> > > + dma_resv_unlock(bo->base.resv);
> > > +
> > > +exit:
> > > + drm_dev_exit(idx);
> > >   return ret;
> > > + } else {
> > > - dma_resv_unlock(bo->base.resv);
> > > + struct drm_file *file = NULL;
> > > + struct page *dummy_page = NULL;
> > > + int handle;
> > > - return ret;
> > > + /* We are faulting on imported BO from dma_buf */
> > > + if (bo->base.dma_buf && bo->base.import_attach) {
> > > + dummy_page = bo->base.dummy_page;
> > > + /* We are faulting on non imported BO, find drm_file owning the 
> > > BO*/
> > Uh, we can't fish that out of the vma->vm_file pointer somehow? Or is that
> > one all wrong? Doing this kind of list walk looks pretty horrible.
> > 
> > If the vma doesn't have the right pointer I guess next option is that we
> > store the drm_file page in gem_bo->dummy_page, and replace it on first
> > export. But that's going to be tricky to track ...
> > 
> > > + } else {
> > > + struct drm_gem_object *gobj;
> > > +
> > > + mutex_lock(&ddev->filelist_mutex);
> > > + list_for_each_entry(file, &ddev->filelist, lhead) {
> > > + spin_lock(&file->table_lock);
> > > + idr_for_each_entry(&file->object_idr, gobj, 
> > > handle) {
> > > + if (gobj == &bo->base) {
> > > + dummy_page = file->dummy_page;
> > > + break;
> > > + }
> > > + }
> > > + spin_unlock(&file->table_lock);
> > > + }
> > > + mutex_unlock(&ddev->filelist_mutex);
> > > + }
> > > +
> > > + if (dummy_page) {
> > > + /*
> > > +  * Let do_fault complete the PTE install e.t.c using 
> > > vmf->page
> > > +  *
> > > +  * TODO - should i call free_page somewhere ?
> > Nah, instead don't call get_page. The page will be around as long as
> > there's a reference for the drm_file or gem_bo, which is longer than any
> > mmap. Otherwise yes this would like really badly.
> 
> 
> So actually that was my thinking in the first place and I indeed avoided
> taking reference and this ended up
> with multiple BUG_ONs as seen bellow where  refcount:-63 mapcount:-48 for a
> page are deep into negative
> values... Those warnings were gone once i added get_page(dummy) which in 

Re: [PATCH v2 2/8] drm/ttm: Remap all page faults to per process dummy page.

2020-06-23 Thread Andrey Grodzovsky


On 6/22/20 5:41 AM, Daniel Vetter wrote:

On Sun, Jun 21, 2020 at 02:03:02AM -0400, Andrey Grodzovsky wrote:

On device removal reroute all CPU mappings to dummy page per drm_file
instance or imported GEM object.

Signed-off-by: Andrey Grodzovsky 
---
  drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 -
  1 file changed, 57 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 389128b..2f8bf5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -35,6 +35,8 @@
  #include 
  #include 
  #include 
+#include 
+#include 
  #include 
  #include 
  #include 
@@ -328,19 +330,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)

Hm I think diff and code flow look a bit bad now. What about renaming the
current function to __ttm_bo_vm_fault and then having something like the
below:

ttm_bo_vm_fault(args) {

if (drm_dev_enter()) {
__ttm_bo_vm_fault(args);
drm_dev_exit();
} else  {
drm_gem_insert_dummy_pfn();
}
}

I think drm_gem_insert_dummy_pfn(); should be portable across drivers, so
another nice point to try to unifiy drivers as much as possible.
-Daniel


pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
vm_fault_t ret;
+   int idx;
+   struct drm_device *ddev = bo->base.dev;
  
-	ret = ttm_bo_vm_reserve(bo, vmf);

-   if (ret)
-   return ret;
+   if (drm_dev_enter(ddev, &idx)) {
+   ret = ttm_bo_vm_reserve(bo, vmf);
+   if (ret)
+   goto exit;
+
+   prot = vma->vm_page_prot;
  
-	prot = vma->vm_page_prot;

-   ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
-   if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+   ret = ttm_bo_vm_fault_reserved(vmf, prot, 
TTM_BO_VM_NUM_PREFAULT);
+   if (ret == VM_FAULT_RETRY && !(vmf->flags & 
FAULT_FLAG_RETRY_NOWAIT))
+   goto exit;
+
+   dma_resv_unlock(bo->base.resv);
+
+exit:
+   drm_dev_exit(idx);
return ret;
+   } else {
  
-	dma_resv_unlock(bo->base.resv);

+   struct drm_file *file = NULL;
+   struct page *dummy_page = NULL;
+   int handle;
  
-	return ret;

+   /* We are faulting on imported BO from dma_buf */
+   if (bo->base.dma_buf && bo->base.import_attach) {
+   dummy_page = bo->base.dummy_page;
+   /* We are faulting on non imported BO, find drm_file owning the 
BO*/

Uh, we can't fish that out of the vma->vm_file pointer somehow? Or is that
one all wrong? Doing this kind of list walk looks pretty horrible.

If the vma doesn't have the right pointer I guess next option is that we
store the drm_file page in gem_bo->dummy_page, and replace it on first
export. But that's going to be tricky to track ...


+   } else {
+   struct drm_gem_object *gobj;
+
+   mutex_lock(&ddev->filelist_mutex);
+   list_for_each_entry(file, &ddev->filelist, lhead) {
+   spin_lock(&file->table_lock);
+   idr_for_each_entry(&file->object_idr, gobj, 
handle) {
+   if (gobj == &bo->base) {
+   dummy_page = file->dummy_page;
+   break;
+   }
+   }
+   spin_unlock(&file->table_lock);
+   }
+   mutex_unlock(&ddev->filelist_mutex);
+   }
+
+   if (dummy_page) {
+   /*
+* Let do_fault complete the PTE install e.t.c using 
vmf->page
+*
+* TODO - should i call free_page somewhere ?

Nah, instead don't call get_page. The page will be around as long as
there's a reference for the drm_file or gem_bo, which is longer than any
mmap. Otherwise yes this would like really badly.



So actually that was my thinking in the first place and I indeed avoided taking 
reference and this ended up
with multiple BUG_ONs as seen bellow where  refcount:-63 mapcount:-48 for a page 
are deep into negative
values... Those warnings were gone once i added get_page(dummy) which in my 
opinion implies that there
is a page reference per each PTE and that when there is unmapping of the process 
address
space and PTEs are deleted there is also put_page somewhere in mm core and the 
get_page per mapping

keeps it balanced.

Jun 20 01:36:43 ubuntu-1604-test kernel: [   98.762929] BUG: Bad page map in 
process glxgear:disk$0  pte:800132284867 pmd:15aaec067
Jun 20 01:36:43 ubuntu-1604-test kerne

Re: [PATCH v2 2/8] drm/ttm: Remap all page faults to per process dummy page.

2020-06-22 Thread Christian König

Am 21.06.20 um 08:03 schrieb Andrey Grodzovsky:

On device removal reroute all CPU mappings to dummy page per drm_file
instance or imported GEM object.

Signed-off-by: Andrey Grodzovsky 
---
  drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 -
  1 file changed, 57 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 389128b..2f8bf5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -35,6 +35,8 @@
  #include 
  #include 
  #include 
+#include 
+#include 
  #include 
  #include 
  #include 
@@ -328,19 +330,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
vm_fault_t ret;
+   int idx;
+   struct drm_device *ddev = bo->base.dev;
  
-	ret = ttm_bo_vm_reserve(bo, vmf);

-   if (ret)
-   return ret;
+   if (drm_dev_enter(ddev, &idx)) {


Better do this like if (!drm_dev_enter(...)) return ttm_bo_vm_dummy(..);

This way you can move all the dummy fault handling into a separate 
function without cluttering this one here to much.


Christian.


+   ret = ttm_bo_vm_reserve(bo, vmf);
+   if (ret)
+   goto exit;
+
+   prot = vma->vm_page_prot;
  
-	prot = vma->vm_page_prot;

-   ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
-   if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+   ret = ttm_bo_vm_fault_reserved(vmf, prot, 
TTM_BO_VM_NUM_PREFAULT);
+   if (ret == VM_FAULT_RETRY && !(vmf->flags & 
FAULT_FLAG_RETRY_NOWAIT))
+   goto exit;
+
+   dma_resv_unlock(bo->base.resv);
+
+exit:
+   drm_dev_exit(idx);
return ret;
+   } else {
  
-	dma_resv_unlock(bo->base.resv);

+   struct drm_file *file = NULL;
+   struct page *dummy_page = NULL;
+   int handle;
  
-	return ret;

+   /* We are faulting on imported BO from dma_buf */
+   if (bo->base.dma_buf && bo->base.import_attach) {
+   dummy_page = bo->base.dummy_page;
+   /* We are faulting on non imported BO, find drm_file owning the 
BO*/
+   } else {
+   struct drm_gem_object *gobj;
+
+   mutex_lock(&ddev->filelist_mutex);
+   list_for_each_entry(file, &ddev->filelist, lhead) {
+   spin_lock(&file->table_lock);
+   idr_for_each_entry(&file->object_idr, gobj, 
handle) {
+   if (gobj == &bo->base) {
+   dummy_page = file->dummy_page;
+   break;
+   }
+   }
+   spin_unlock(&file->table_lock);
+   }
+   mutex_unlock(&ddev->filelist_mutex);
+   }
+
+   if (dummy_page) {
+   /*
+* Let do_fault complete the PTE install e.t.c using 
vmf->page
+*
+* TODO - should i call free_page somewhere ?
+*/
+   get_page(dummy_page);
+   vmf->page = dummy_page;
+   return 0;
+   } else {
+   return VM_FAULT_SIGSEGV;
+   }
+   }
  }
  EXPORT_SYMBOL(ttm_bo_vm_fault);
  


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 2/8] drm/ttm: Remap all page faults to per process dummy page.

2020-06-22 Thread Daniel Vetter
On Sun, Jun 21, 2020 at 02:03:02AM -0400, Andrey Grodzovsky wrote:
> On device removal reroute all CPU mappings to dummy page per drm_file
> instance or imported GEM object.
> 
> Signed-off-by: Andrey Grodzovsky 
> ---
>  drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 
> -
>  1 file changed, 57 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 389128b..2f8bf5e 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -35,6 +35,8 @@
>  #include 
>  #include 
>  #include 
> +#include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -328,19 +330,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)

Hm I think diff and code flow look a bit bad now. What about renaming the
current function to __ttm_bo_vm_fault and then having something like the
below:

ttm_bo_vm_fault(args) {

if (drm_dev_enter()) {
__ttm_bo_vm_fault(args);
drm_dev_exit();
} else  {
drm_gem_insert_dummy_pfn();
}
}

I think drm_gem_insert_dummy_pfn(); should be portable across drivers, so
another nice point to try to unifiy drivers as much as possible.
-Daniel

>   pgprot_t prot;
>   struct ttm_buffer_object *bo = vma->vm_private_data;
>   vm_fault_t ret;
> + int idx;
> + struct drm_device *ddev = bo->base.dev;
>  
> - ret = ttm_bo_vm_reserve(bo, vmf);
> - if (ret)
> - return ret;
> + if (drm_dev_enter(ddev, &idx)) {
> + ret = ttm_bo_vm_reserve(bo, vmf);
> + if (ret)
> + goto exit;
> +
> + prot = vma->vm_page_prot;
>  
> - prot = vma->vm_page_prot;
> - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
> - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
> + ret = ttm_bo_vm_fault_reserved(vmf, prot, 
> TTM_BO_VM_NUM_PREFAULT);
> + if (ret == VM_FAULT_RETRY && !(vmf->flags & 
> FAULT_FLAG_RETRY_NOWAIT))
> + goto exit;
> +
> + dma_resv_unlock(bo->base.resv);
> +
> +exit:
> + drm_dev_exit(idx);
>   return ret;
> + } else {
>  
> - dma_resv_unlock(bo->base.resv);
> + struct drm_file *file = NULL;
> + struct page *dummy_page = NULL;
> + int handle;
>  
> - return ret;
> + /* We are faulting on imported BO from dma_buf */
> + if (bo->base.dma_buf && bo->base.import_attach) {
> + dummy_page = bo->base.dummy_page;
> + /* We are faulting on non imported BO, find drm_file owning the 
> BO*/

Uh, we can't fish that out of the vma->vm_file pointer somehow? Or is that
one all wrong? Doing this kind of list walk looks pretty horrible.

If the vma doesn't have the right pointer I guess next option is that we
store the drm_file page in gem_bo->dummy_page, and replace it on first
export. But that's going to be tricky to track ...

> + } else {
> + struct drm_gem_object *gobj;
> +
> + mutex_lock(&ddev->filelist_mutex);
> + list_for_each_entry(file, &ddev->filelist, lhead) {
> + spin_lock(&file->table_lock);
> + idr_for_each_entry(&file->object_idr, gobj, 
> handle) {
> + if (gobj == &bo->base) {
> + dummy_page = file->dummy_page;
> + break;
> + }
> + }
> + spin_unlock(&file->table_lock);
> + }
> + mutex_unlock(&ddev->filelist_mutex);
> + }
> +
> + if (dummy_page) {
> + /*
> +  * Let do_fault complete the PTE install e.t.c using 
> vmf->page
> +  *
> +  * TODO - should i call free_page somewhere ?

Nah, instead don't call get_page. The page will be around as long as
there's a reference for the drm_file or gem_bo, which is longer than any
mmap. Otherwise yes this would like really badly.

> +  */
> + get_page(dummy_page);
> + vmf->page = dummy_page;
> + return 0;
> + } else {
> + return VM_FAULT_SIGSEGV;

Hm that would be a kernel bug, wouldn't it? WARN_ON() required here imo.
-Daniel

> + }
> + }
>  }
>  EXPORT_SYMBOL(ttm_bo_vm_fault);
>  
> -- 
> 2.7.4
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx