As we may add new waiters to a request as it is being run, we need to
mark the list iteration as being safe for concurrent addition.

v2: Mika spotted that we used the same trick for signalers_list, so warn
the compiler about the lockless walk there as well.

Fixes: 32ff621fd744 ("drm/i915/gt: Allow temporary suspension of inflight 
requests")
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Cc: Mika Kuoppala <mika.kuopp...@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuopp...@linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c   | 11 ++++++++---
 drivers/gpu/drm/i915/i915_scheduler.c |  2 +-
 2 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b350e01d86d2..ed1e4d883d47 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1620,6 +1620,11 @@ last_active(const struct intel_engine_execlists 
*execlists)
                                     &(rq__)->sched.waiters_list, \
                                     wait_link)
 
+#define for_each_signaler(p__, rq__) \
+       list_for_each_entry_lockless(p__, \
+                                    &(rq__)->sched.signalers_list, \
+                                    signal_link)
+
 static void defer_request(struct i915_request *rq, struct list_head * const pl)
 {
        LIST_HEAD(list);
@@ -2378,7 +2383,7 @@ static void __execlists_hold(struct i915_request *rq)
                list_move_tail(&rq->sched.link, &rq->engine->active.hold);
                i915_request_set_hold(rq);
 
-               list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+               for_each_waiter(p, rq) {
                        struct i915_request *w =
                                container_of(p->waiter, typeof(*w), sched);
 
@@ -2464,7 +2469,7 @@ static bool hold_request(const struct i915_request *rq)
         * If one of our ancestors is on hold, we must also be on hold,
         * otherwise we will bypass it and execute before it.
         */
-       list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+       for_each_signaler(p, rq) {
                const struct i915_request *s =
                        container_of(p->signaler, typeof(*s), sched);
 
@@ -2496,7 +2501,7 @@ static void __execlists_unhold(struct i915_request *rq)
                RQ_TRACE(rq, "hold release\n");
 
                /* Also release any children on this engine that are ready */
-               list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+               for_each_waiter(p, rq) {
                        struct i915_request *w =
                                container_of(p->waiter, typeof(*w), sched);
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 34b654b4e58a..e19a37a83397 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -433,7 +433,7 @@ bool __i915_sched_node_add_dependency(struct 
i915_sched_node *node,
                        node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
 
                /* All set, now publish. Beware the lockless walkers. */
-               list_add(&dep->signal_link, &node->signalers_list);
+               list_add_rcu(&dep->signal_link, &node->signalers_list);
                list_add_rcu(&dep->wait_link, &signal->waiters_list);
 
                /*
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to