>-----Original Message-----
>From: Dmitry Osipenko <dmitry.osipe...@collabora.com>
>Sent: Wednesday, August 31, 2022 11:38 AM
>To: David Airlie <airl...@linux.ie>; Gerd Hoffmann <kra...@redhat.com>;
>Gurchetan Singh <gurchetansi...@chromium.org>; Chia-I Wu
><olva...@gmail.com>; Daniel Vetter <dan...@ffwll.ch>; Daniel Almeida
><daniel.alme...@collabora.com>; Gert Wollny <gert.wol...@collabora.com>;
>Gustavo Padovan <gustavo.pado...@collabora.com>; Daniel Stone
><dan...@fooishbar.org>; Tomeu Vizoso <tomeu.viz...@collabora.com>;
>Maarten Lankhorst <maarten.lankho...@linux.intel.com>; Maxime Ripard
><mrip...@kernel.org>; Thomas Zimmermann <tzimmerm...@suse.de>;
>Rob Clark <robdcl...@gmail.com>; Sumit Semwal
><sumit.sem...@linaro.org>; Christian König <christian.koe...@amd.com>;
>Pan, Xinhui <xinhui....@amd.com>; Thierry Reding
><thierry.red...@gmail.com>; Tomasz Figa <tf...@chromium.org>; Marek
>Szyprowski <m.szyprow...@samsung.com>; Mauro Carvalho Chehab
><mche...@kernel.org>; Alex Deucher <alexander.deuc...@amd.com>; Jani
>Nikula <jani.nik...@linux.intel.com>; Joonas Lahtinen
><joonas.lahti...@linux.intel.com>; Vivi, Rodrigo <rodrigo.v...@intel.com>;
>Tvrtko Ursulin <tvrtko.ursu...@linux.intel.com>; Thomas Hellström
><thomas...@shipmail.org>; Qiang Yu <yuq...@gmail.com>; Srinivas
>Kandagatla <srinivas.kandaga...@linaro.org>; Amol Maheshwari
><amah...@qti.qualcomm.com>; Jason Gunthorpe <j...@ziepe.ca>; Leon
>Romanovsky <l...@kernel.org>; Gross, Jurgen <jgr...@suse.com>; Stefano
>Stabellini <sstabell...@kernel.org>; Oleksandr Tyshchenko
><oleksandr_tyshche...@epam.com>; Tomi Valkeinen <to...@kernel.org>;
>Russell King <li...@armlinux.org.uk>; Lucas Stach <l.st...@pengutronix.de>;
>Christian Gmeiner <christian.gmei...@gmail.com>
>Cc: dri-devel@lists.freedesktop.org; linux-ker...@vger.kernel.org; Dmitry
>Osipenko <dig...@gmail.com>; linux-me...@vger.kernel.org; linaro-mm-
>s...@lists.linaro.org; amd-...@lists.freedesktop.org; intel-
>g...@lists.freedesktop.org; ker...@collabora.com; virtualization@lists.linux-
>foundation.org; linux-r...@vger.kernel.org; linux-arm-
>m...@vger.kernel.org
>Subject: [PATCH v4 06/21] drm/i915: Prepare to dynamic dma-buf locking
>specification
>
>Prepare i915 driver to the common dynamic dma-buf locking convention
>by starting to use the unlocked versions of dma-buf API functions
>and handling cases where importer now holds the reservation lock.
>
>Signed-off-by: Dmitry Osipenko <dmitry.osipe...@collabora.com>
>---
> drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c       |  2 +-
> drivers/gpu/drm/i915/gem/i915_gem_object.c       | 12 ++++++++++++
> .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 16 ++++++++--------
> 3 files changed, 21 insertions(+), 9 deletions(-)
>
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>index f5062d0c6333..07eee1c09aaf 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>@@ -72,7 +72,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf
>*dma_buf,
>       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
>       void *vaddr;
>
>-      vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
>+      vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
>       if (IS_ERR(vaddr))
>               return PTR_ERR(vaddr);
>
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>index 389e9f157ca5..7e2a9b02526c 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>@@ -331,7 +331,19 @@ static void __i915_gem_free_objects(struct
>drm_i915_private *i915,
>                       continue;
>               }
>
>+              /*
>+               * dma_buf_unmap_attachment() requires reservation to be
>+               * locked. The imported GEM shouldn't share reservation lock,
>+               * so it's safe to take the lock.
>+               */
>+              if (obj->base.import_attach)
>+                      i915_gem_object_lock(obj, NULL);

There is a lot of stuff going here.  Taking the lock may be premature...

>               __i915_gem_object_pages_fini(obj);

The i915_gem_dmabuf.c:i915_gem_object_put_pages_dmabuf is where
unmap_attachment is actually called, would it make more sense to make
do the locking there?

Mike


>+
>+              if (obj->base.import_attach)
>+                      i915_gem_object_unlock(obj);
>+
>               __i915_gem_free_object(obj);
>
>               /* But keep the pointer alive for RCU-protected lookups */
>diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
>b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
>index 62c61af77a42..9e3ed634aa0e 100644
>--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
>+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
>@@ -213,7 +213,7 @@ static int igt_dmabuf_import_same_driver(struct
>drm_i915_private *i915,
>               goto out_import;
>       }
>
>-      st = dma_buf_map_attachment(import_attach,
>DMA_BIDIRECTIONAL);
>+      st = dma_buf_map_attachment_unlocked(import_attach,
>DMA_BIDIRECTIONAL);
>       if (IS_ERR(st)) {
>               err = PTR_ERR(st);
>               goto out_detach;
>@@ -226,7 +226,7 @@ static int igt_dmabuf_import_same_driver(struct
>drm_i915_private *i915,
>               timeout = -ETIME;
>       }
>       err = timeout > 0 ? 0 : timeout;
>-      dma_buf_unmap_attachment(import_attach, st,
>DMA_BIDIRECTIONAL);
>+      dma_buf_unmap_attachment_unlocked(import_attach, st,
>DMA_BIDIRECTIONAL);
> out_detach:
>       dma_buf_detach(dmabuf, import_attach);
> out_import:
>@@ -296,7 +296,7 @@ static int igt_dmabuf_import(void *arg)
>               goto out_obj;
>       }
>
>-      err = dma_buf_vmap(dmabuf, &map);
>+      err = dma_buf_vmap_unlocked(dmabuf, &map);
>       dma_map = err ? NULL : map.vaddr;
>       if (!dma_map) {
>               pr_err("dma_buf_vmap failed\n");
>@@ -337,7 +337,7 @@ static int igt_dmabuf_import(void *arg)
>
>       err = 0;
> out_dma_map:
>-      dma_buf_vunmap(dmabuf, &map);
>+      dma_buf_vunmap_unlocked(dmabuf, &map);
> out_obj:
>       i915_gem_object_put(obj);
> out_dmabuf:
>@@ -358,7 +358,7 @@ static int igt_dmabuf_import_ownership(void *arg)
>       if (IS_ERR(dmabuf))
>               return PTR_ERR(dmabuf);
>
>-      err = dma_buf_vmap(dmabuf, &map);
>+      err = dma_buf_vmap_unlocked(dmabuf, &map);
>       ptr = err ? NULL : map.vaddr;
>       if (!ptr) {
>               pr_err("dma_buf_vmap failed\n");
>@@ -367,7 +367,7 @@ static int igt_dmabuf_import_ownership(void *arg)
>       }
>
>       memset(ptr, 0xc5, PAGE_SIZE);
>-      dma_buf_vunmap(dmabuf, &map);
>+      dma_buf_vunmap_unlocked(dmabuf, &map);
>
>       obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
>       if (IS_ERR(obj)) {
>@@ -418,7 +418,7 @@ static int igt_dmabuf_export_vmap(void *arg)
>       }
>       i915_gem_object_put(obj);
>
>-      err = dma_buf_vmap(dmabuf, &map);
>+      err = dma_buf_vmap_unlocked(dmabuf, &map);
>       ptr = err ? NULL : map.vaddr;
>       if (!ptr) {
>               pr_err("dma_buf_vmap failed\n");
>@@ -435,7 +435,7 @@ static int igt_dmabuf_export_vmap(void *arg)
>       memset(ptr, 0xc5, dmabuf->size);
>
>       err = 0;
>-      dma_buf_vunmap(dmabuf, &map);
>+      dma_buf_vunmap_unlocked(dmabuf, &map);
> out:
>       dma_buf_put(dmabuf);
>       return err;
>--
>2.37.2

Reply via email to