There is no need to refcount vmappings of dma-bufs because dma-buf core
has its own refcounting. Drop the refcounting of dma-bufs. This will ease
replacing of all drm-shmem locks with a single dma-buf reservation lock,
preparing drm-shmem code for addition of the generic drm-shmem shrinker.

Signed-off-by: Dmitry Osipenko <dmitry.osipe...@collabora.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 35 +++++++++++++++-----------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 7232e321fdb4..fd2647690bf7 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -293,24 +293,22 @@ static int drm_gem_shmem_vmap_locked(struct 
drm_gem_shmem_object *shmem,
        struct drm_gem_object *obj = &shmem->base;
        int ret = 0;
 
-       if (shmem->vmap_use_count++ > 0) {
-               iosys_map_set_vaddr(map, shmem->vaddr);
-               return 0;
-       }
-
        if (obj->import_attach) {
                ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
                if (!ret) {
                        if (WARN_ON(map->is_iomem)) {
                                dma_buf_vunmap(obj->import_attach->dmabuf, map);
-                               ret = -EIO;
-                               goto err_put_pages;
+                               return -EIO;
                        }
-                       shmem->vaddr = map->vaddr;
                }
        } else {
                pgprot_t prot = PAGE_KERNEL;
 
+               if (shmem->vmap_use_count++ > 0) {
+                       iosys_map_set_vaddr(map, shmem->vaddr);
+                       return 0;
+               }
+
                ret = drm_gem_shmem_get_pages(shmem);
                if (ret)
                        goto err_zero_use;
@@ -376,15 +374,15 @@ static void drm_gem_shmem_vunmap_locked(struct 
drm_gem_shmem_object *shmem,
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       if (WARN_ON_ONCE(!shmem->vmap_use_count))
-               return;
-
-       if (--shmem->vmap_use_count > 0)
-               return;
-
        if (obj->import_attach) {
                dma_buf_vunmap(obj->import_attach->dmabuf, map);
        } else {
+               if (WARN_ON_ONCE(!shmem->vmap_use_count))
+                       return;
+
+               if (--shmem->vmap_use_count > 0)
+                       return;
+
                vunmap(shmem->vaddr);
                drm_gem_shmem_put_pages(shmem);
        }
@@ -637,7 +635,14 @@ void drm_gem_shmem_print_info(const struct 
drm_gem_shmem_object *shmem,
                              struct drm_printer *p, unsigned int indent)
 {
        drm_printf_indent(p, indent, "pages_use_count=%u\n", 
shmem->pages_use_count);
-       drm_printf_indent(p, indent, "vmap_use_count=%u\n", 
shmem->vmap_use_count);
+
+       if (shmem->base.import_attach)
+               drm_printf_indent(p, indent, "vmap_use_count=%u\n",
+                                 shmem->base.dma_buf->vmapping_counter);
+       else
+               drm_printf_indent(p, indent, "vmap_use_count=%u\n",
+                                 shmem->vmap_use_count);
+
        drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
 }
 EXPORT_SYMBOL(drm_gem_shmem_print_info);
-- 
2.35.3

Reply via email to