First step towards new shared fence container implementation.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/dma-buf/dma-resv.c              | 16 +---------------
 drivers/gpu/drm/msm/msm_gem.c           | 14 ++++++--------
 drivers/gpu/drm/nouveau/nouveau_fence.c |  2 +-
 3 files changed, 8 insertions(+), 24 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 24adc32d36d4..d3a9a3bb15f0 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -271,32 +271,18 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
        struct dma_fence *old_fence = dma_resv_get_excl(obj);
-       struct dma_resv_list *old;
-       u32 i = 0;
 
        dma_resv_assert_held(obj);
 
-       old = dma_resv_get_list(obj);
-       if (old)
-               i = old->shared_count;
-
-       if (fence)
-               dma_fence_get(fence);
+       dma_fence_get(fence);
 
        preempt_disable();
        write_seqcount_begin(&obj->seq);
        /* write_seqcount_begin provides the necessary memory barrier */
        RCU_INIT_POINTER(obj->fence_excl, fence);
-       if (old)
-               old->shared_count = 0;
        write_seqcount_end(&obj->seq);
        preempt_enable();
 
-       /* inplace update, no shared fences */
-       while (i--)
-               dma_fence_put(rcu_dereference_protected(old->shared[i],
-                                               dma_resv_held(obj)));
-
        dma_fence_put(old_fence);
 }
 EXPORT_SYMBOL(dma_resv_add_excl_fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 348a7ad2c044..90e3dc3b927a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -668,14 +668,12 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
        int i, ret;
 
        fobj = dma_resv_get_list(obj->resv);
-       if (!fobj || (fobj->shared_count == 0)) {
-               fence = dma_resv_get_excl(obj->resv);
-               /* don't need to wait on our own fences, since ring is fifo */
-               if (fence && (fence->context != fctx->context)) {
-                       ret = dma_fence_wait(fence, true);
-                       if (ret)
-                               return ret;
-               }
+       fence = dma_resv_get_excl(obj->resv);
+       /* don't need to wait on our own fences, since ring is fifo */
+       if (fence && (fence->context != fctx->context)) {
+               ret = dma_fence_wait(fence, true);
+               if (ret)
+                       return ret;
        }
 
        if (!exclusive || !fobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c 
b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 8df390078c85..42ddddbb49e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -350,7 +350,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct 
nouveau_channel *chan, bool e
        fobj = dma_resv_get_list(resv);
        fence = dma_resv_get_excl(resv);
 
-       if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
+       if (fence) {
                struct nouveau_channel *prev = NULL;
                bool must_wait = true;
 
-- 
2.17.1

Reply via email to