Re: [Intel-gfx] [PATCH v15 16/23] drm/shmem-helper: Use kref for vmap_use_count

2023-09-02 Thread Dmitry Osipenko
On 8/28/23 13:00, Boris Brezillon wrote:
> On Sun, 27 Aug 2023 20:54:42 +0300
> Dmitry Osipenko  wrote:
> 
>> Use kref helper for vmap_use_count to make refcounting consistent with
>> pages_use_count and pages_pin_count that use kref. This will allow to
>> optimize unlocked vmappings by skipping reservation locking if refcnt > 1.
> 
> The core is taking the resv lock before calling ->v[un]map(), so
> switching to a kref sounds a bit premature/useless, unless there are
> plans to delegate the locking to the drivers. The only thing it brings
> is standard overflow/underflow checks. Not really sure it's worth
> transitioning to a kref for this field until we have a real use case.

The overflow checks worth transitioning. I'll mention them in the commit
message for v16.

-- 
Best regards,
Dmitry



Re: [Intel-gfx] [PATCH v15 16/23] drm/shmem-helper: Use kref for vmap_use_count

2023-08-28 Thread Boris Brezillon
On Sun, 27 Aug 2023 20:54:42 +0300
Dmitry Osipenko  wrote:

> Use kref helper for vmap_use_count to make refcounting consistent with
> pages_use_count and pages_pin_count that use kref. This will allow to
> optimize unlocked vmappings by skipping reservation locking if refcnt > 1.

The core is taking the resv lock before calling ->v[un]map(), so
switching to a kref sounds a bit premature/useless, unless there are
plans to delegate the locking to the drivers. The only thing it brings
is standard overflow/underflow checks. Not really sure it's worth
transitioning to a kref for this field until we have a real use case.

> 
> Suggested-by: Boris Brezillon 
> Signed-off-by: Dmitry Osipenko 
> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 37 ++
>  include/drm/drm_gem_shmem_helper.h |  2 +-
>  2 files changed, 21 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
> b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 17a0177acb5d..d96fee3d6166 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -144,7 +144,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object 
> *shmem)
>   } else if (!shmem->imported_sgt) {
>   dma_resv_lock(shmem->base.resv, NULL);
>  
> - drm_WARN_ON(obj->dev, shmem->vmap_use_count);
> + drm_WARN_ON(obj->dev, kref_read(>vmap_use_count));
>  
>   if (shmem->sgt) {
>   dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
> @@ -359,23 +359,25 @@ int drm_gem_shmem_vmap_locked(struct 
> drm_gem_shmem_object *shmem,
>  
>   dma_resv_assert_held(shmem->base.resv);
>  
> - if (shmem->vmap_use_count++ > 0) {
> + if (kref_get_unless_zero(>vmap_use_count)) {
>   iosys_map_set_vaddr(map, shmem->vaddr);
>   return 0;
>   }
>  
>   ret = drm_gem_shmem_pin_locked(shmem);
>   if (ret)
> - goto err_zero_use;
> + return ret;
>  
>   if (shmem->map_wc)
>   prot = pgprot_writecombine(prot);
>   shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
>   VM_MAP, prot);
> - if (!shmem->vaddr)
> + if (!shmem->vaddr) {
>   ret = -ENOMEM;
> - else
> + } else {
>   iosys_map_set_vaddr(map, shmem->vaddr);
> + kref_init(>vmap_use_count);
> + }
>   }
>  
>   if (ret) {
> @@ -388,13 +390,22 @@ int drm_gem_shmem_vmap_locked(struct 
> drm_gem_shmem_object *shmem,
>  err_put_pages:
>   if (!obj->import_attach)
>   drm_gem_shmem_unpin_locked(shmem);
> -err_zero_use:
> - shmem->vmap_use_count = 0;
>  
>   return ret;
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
>  
> +static void drm_gem_shmem_kref_vunmap(struct kref *kref)
> +{
> + struct drm_gem_shmem_object *shmem;
> +
> + shmem = container_of(kref, struct drm_gem_shmem_object,
> +  vmap_use_count);
> +
> + vunmap(shmem->vaddr);
> + drm_gem_shmem_unpin_locked(shmem);
> +}
> +
>  /*
>   * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM 
> object
>   * @shmem: shmem GEM object
> @@ -416,15 +427,7 @@ void drm_gem_shmem_vunmap_locked(struct 
> drm_gem_shmem_object *shmem,
>   dma_buf_vunmap(obj->import_attach->dmabuf, map);
>   } else {
>   dma_resv_assert_held(shmem->base.resv);
> -
> - if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
> - return;
> -
> - if (--shmem->vmap_use_count > 0)
> - return;
> -
> - vunmap(shmem->vaddr);
> - drm_gem_shmem_unpin_locked(shmem);
> + kref_put(>vmap_use_count, drm_gem_shmem_kref_vunmap);
>   }
>  
>   shmem->vaddr = NULL;
> @@ -663,7 +666,7 @@ void drm_gem_shmem_print_info(const struct 
> drm_gem_shmem_object *shmem,
>   return;
>  
>   drm_printf_indent(p, indent, "pages_use_count=%u\n", 
> kref_read(>pages_use_count));
> - drm_printf_indent(p, indent, "vmap_use_count=%u\n", 
> shmem->vmap_use_count);
> + drm_printf_indent(p, indent, "vmap_use_count=%u\n", 
> kref_read(>vmap_use_count));
>   drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
> diff --git a/include/drm/drm_gem_shmem_helper.h 
> b/include/drm/drm_gem_shmem_helper.h
> index 400ecd63f45f..0e0ccd380f66 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -81,7 +81,7 @@ struct drm_gem_shmem_object {
>* Reference count on the virtual address.
>* The address are un-mapped when the count reaches zero.
>*/
> - unsigned int vmap_use_count;
> + struct kref 

[Intel-gfx] [PATCH v15 16/23] drm/shmem-helper: Use kref for vmap_use_count

2023-08-27 Thread Dmitry Osipenko
Use kref helper for vmap_use_count to make refcounting consistent with
pages_use_count and pages_pin_count that use kref. This will allow to
optimize unlocked vmappings by skipping reservation locking if refcnt > 1.

Suggested-by: Boris Brezillon 
Signed-off-by: Dmitry Osipenko 
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 37 ++
 include/drm/drm_gem_shmem_helper.h |  2 +-
 2 files changed, 21 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 17a0177acb5d..d96fee3d6166 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -144,7 +144,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else if (!shmem->imported_sgt) {
dma_resv_lock(shmem->base.resv, NULL);
 
-   drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+   drm_WARN_ON(obj->dev, kref_read(>vmap_use_count));
 
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -359,23 +359,25 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object 
*shmem,
 
dma_resv_assert_held(shmem->base.resv);
 
-   if (shmem->vmap_use_count++ > 0) {
+   if (kref_get_unless_zero(>vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
 
ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
-   goto err_zero_use;
+   return ret;
 
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
-   if (!shmem->vaddr)
+   if (!shmem->vaddr) {
ret = -ENOMEM;
-   else
+   } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+   kref_init(>vmap_use_count);
+   }
}
 
if (ret) {
@@ -388,13 +390,22 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object 
*shmem,
 err_put_pages:
if (!obj->import_attach)
drm_gem_shmem_unpin_locked(shmem);
-err_zero_use:
-   shmem->vmap_use_count = 0;
 
return ret;
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
 
+static void drm_gem_shmem_kref_vunmap(struct kref *kref)
+{
+   struct drm_gem_shmem_object *shmem;
+
+   shmem = container_of(kref, struct drm_gem_shmem_object,
+vmap_use_count);
+
+   vunmap(shmem->vaddr);
+   drm_gem_shmem_unpin_locked(shmem);
+}
+
 /*
  * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
  * @shmem: shmem GEM object
@@ -416,15 +427,7 @@ void drm_gem_shmem_vunmap_locked(struct 
drm_gem_shmem_object *shmem,
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
-
-   if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
-   return;
-
-   if (--shmem->vmap_use_count > 0)
-   return;
-
-   vunmap(shmem->vaddr);
-   drm_gem_shmem_unpin_locked(shmem);
+   kref_put(>vmap_use_count, drm_gem_shmem_kref_vunmap);
}
 
shmem->vaddr = NULL;
@@ -663,7 +666,7 @@ void drm_gem_shmem_print_info(const struct 
drm_gem_shmem_object *shmem,
return;
 
drm_printf_indent(p, indent, "pages_use_count=%u\n", 
kref_read(>pages_use_count));
-   drm_printf_indent(p, indent, "vmap_use_count=%u\n", 
shmem->vmap_use_count);
+   drm_printf_indent(p, indent, "vmap_use_count=%u\n", 
kref_read(>vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
diff --git a/include/drm/drm_gem_shmem_helper.h 
b/include/drm/drm_gem_shmem_helper.h
index 400ecd63f45f..0e0ccd380f66 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -81,7 +81,7 @@ struct drm_gem_shmem_object {
 * Reference count on the virtual address.
 * The address are un-mapped when the count reaches zero.
 */
-   unsigned int vmap_use_count;
+   struct kref vmap_use_count;
 
/**
 * @got_sgt:
-- 
2.41.0