In preparation for making eb_vma bigger and heavy to run inn parallel,
we need to stop apply an in-place swap() to reorder around ww_mutex
deadlocks. Keep the array intact and reorder the locks using a dedicated
list.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 48 +++++++++++--------
 drivers/gpu/drm/i915/i915_utils.h             |  6 +++
 2 files changed, 34 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 67ba33b3de60..0d773767c2ac 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -36,6 +36,7 @@ struct eb_vma {
        struct drm_i915_gem_exec_object2 *exec;
        struct list_head bind_link;
        struct list_head reloc_link;
+       struct list_head lock_link;
 
        struct hlist_node node;
        u32 handle;
@@ -254,6 +255,8 @@ struct i915_execbuffer {
        /** list of vma that have execobj.relocation_count */
        struct list_head relocs;
 
+       struct list_head lock;
+
        /**
         * Track the most recently used object for relocations, as we
         * frequently have to perform multiple relocations within the same
@@ -399,6 +402,10 @@ static int eb_create(struct i915_execbuffer *eb)
                eb->lut_size = -eb->buffer_count;
        }
 
+       INIT_LIST_HEAD(&eb->relocs);
+       INIT_LIST_HEAD(&eb->unbound);
+       INIT_LIST_HEAD(&eb->lock);
+
        return 0;
 }
 
@@ -604,6 +611,8 @@ eb_add_vma(struct i915_execbuffer *eb,
                eb_unreserve_vma(ev);
                list_add_tail(&ev->bind_link, &eb->unbound);
        }
+
+       list_add_tail(&ev->lock_link, &eb->lock);
 }
 
 static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -880,9 +889,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
        unsigned int i;
        int err = 0;
 
-       INIT_LIST_HEAD(&eb->relocs);
-       INIT_LIST_HEAD(&eb->unbound);
-
        for (i = 0; i < eb->buffer_count; i++) {
                struct i915_vma *vma;
 
@@ -1790,38 +1796,39 @@ static int eb_relocate(struct i915_execbuffer *eb)
 
 static int eb_move_to_gpu(struct i915_execbuffer *eb)
 {
-       const unsigned int count = eb->buffer_count;
        struct ww_acquire_ctx acquire;
-       unsigned int i;
+       struct eb_vma *ev;
        int err = 0;
 
        ww_acquire_init(&acquire, &reservation_ww_class);
 
-       for (i = 0; i < count; i++) {
-               struct eb_vma *ev = &eb->vma[i];
+       list_for_each_entry(ev, &eb->lock, lock_link) {
                struct i915_vma *vma = ev->vma;
 
                err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
                if (err == -EDEADLK) {
-                       GEM_BUG_ON(i == 0);
-                       do {
-                               int j = i - 1;
+                       struct eb_vma *unlock = ev, *en;
 
-                               ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
-
-                               swap(eb->vma[i],  eb->vma[j]);
-                       } while (--i);
+                       list_for_each_entry_safe_continue_reverse(unlock, en, 
&eb->lock, lock_link) {
+                               ww_mutex_unlock(&unlock->vma->resv->lock);
+                               list_move_tail(&unlock->lock_link, &eb->lock);
+                       }
 
+                       GEM_BUG_ON(!list_is_first(&ev->lock_link, &eb->lock));
                        err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
                                                               &acquire);
                }
-               if (err)
-                       break;
+               if (err) {
+                       list_for_each_entry_continue_reverse(ev, &eb->lock, 
lock_link)
+                               ww_mutex_unlock(&ev->vma->resv->lock);
+
+                       ww_acquire_fini(&acquire);
+                       goto err_skip;
+               }
        }
        ww_acquire_done(&acquire);
 
-       while (i--) {
-               struct eb_vma *ev = &eb->vma[i];
+       list_for_each_entry(ev, &eb->lock, lock_link) {
                struct i915_vma *vma = ev->vma;
                unsigned int flags = ev->flags;
                struct drm_i915_gem_object *obj = vma->obj;
@@ -2122,9 +2129,10 @@ static int eb_parse(struct i915_execbuffer *eb)
        if (err)
                goto err_trampoline;
 
-       eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
-       eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
        eb->batch = &eb->vma[eb->buffer_count++];
+       eb->batch->vma = i915_vma_get(shadow);
+       eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
+       list_add_tail(&eb->batch->lock_link, &eb->lock);
        eb->vma[eb->buffer_count].vma = NULL;
 
        eb->trampoline = trampoline;
diff --git a/drivers/gpu/drm/i915/i915_utils.h 
b/drivers/gpu/drm/i915/i915_utils.h
index 03a73d2bd50d..28813806bc19 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -266,6 +266,12 @@ static inline int list_is_last_rcu(const struct list_head 
*list,
        return READ_ONCE(list->next) == head;
 }
 
+#define list_for_each_entry_safe_continue_reverse(pos, n, head, member)        
\
+       for (pos = list_prev_entry(pos, member),                        \
+               n = list_prev_entry(pos, member);                       \
+            &pos->member != (head);                                    \
+            pos = n, n = list_prev_entry(n, member))
+
 /*
  * Wait until the work is finally complete, even if it tries to postpone
  * by requeueing itself. Note, that if the worker never cancels itself,
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to