Make the ability to suspend and resume a request and its dependents
generic.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 .../drm/i915/gt/intel_execlists_submission.c  | 148 +-----------------
 drivers/gpu/drm/i915/i915_scheduler.c         | 120 ++++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.h         |   5 +
 3 files changed, 129 insertions(+), 144 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index ada2f64c6c8b..c2780b1c2fc8 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1860,166 +1860,26 @@ static void post_process_csb(struct i915_request 
**port,
                execlists_schedule_out(*port++);
 }
 
-static void __execlists_hold(struct i915_request *rq)
-{
-       LIST_HEAD(list);
-
-       do {
-               struct i915_dependency *p;
-
-               if (i915_request_is_active(rq))
-                       __i915_request_unsubmit(rq);
-
-               clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-               list_move_tail(&rq->sched.link, &rq->engine->active.hold);
-               i915_request_set_hold(rq);
-               RQ_TRACE(rq, "on hold\n");
-
-               for_each_waiter(p, rq) {
-                       struct i915_request *w =
-                               container_of(p->waiter, typeof(*w), sched);
-
-                       if (p->flags & I915_DEPENDENCY_WEAK)
-                               continue;
-
-                       /* Leave semaphores spinning on the other engines */
-                       if (w->engine != rq->engine)
-                               continue;
-
-                       if (!i915_request_is_ready(w))
-                               continue;
-
-                       if (__i915_request_is_complete(w))
-                               continue;
-
-                       if (i915_request_on_hold(w))
-                               continue;
-
-                       list_move_tail(&w->sched.link, &list);
-               }
-
-               rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
-       } while (rq);
-}
-
 static bool execlists_hold(struct intel_engine_cs *engine,
                           struct i915_request *rq)
 {
+       bool result;
+
        if (i915_request_on_hold(rq))
                return false;
 
        spin_lock_irq(&engine->active.lock);
-
-       if (__i915_request_is_complete(rq)) { /* too late! */
-               rq = NULL;
-               goto unlock;
-       }
-
-       /*
-        * Transfer this request onto the hold queue to prevent it
-        * being resumbitted to HW (and potentially completed) before we have
-        * released it. Since we may have already submitted following
-        * requests, we need to remove those as well.
-        */
-       GEM_BUG_ON(i915_request_on_hold(rq));
-       GEM_BUG_ON(rq->engine != engine);
-       __execlists_hold(rq);
-       GEM_BUG_ON(list_empty(&engine->active.hold));
-
-unlock:
+       result = __intel_engine_hold_request(engine, rq);
        spin_unlock_irq(&engine->active.lock);
-       return rq;
-}
-
-static bool hold_request(const struct i915_request *rq)
-{
-       struct i915_dependency *p;
-       bool result = false;
-
-       /*
-        * If one of our ancestors is on hold, we must also be on hold,
-        * otherwise we will bypass it and execute before it.
-        */
-       rcu_read_lock();
-       for_each_signaler(p, rq) {
-               const struct i915_request *s =
-                       container_of(p->signaler, typeof(*s), sched);
-
-               if (s->engine != rq->engine)
-                       continue;
-
-               result = i915_request_on_hold(s);
-               if (result)
-                       break;
-       }
-       rcu_read_unlock();
 
        return result;
 }
 
-static void __execlists_unhold(struct i915_request *rq)
-{
-       LIST_HEAD(list);
-
-       do {
-               struct i915_dependency *p;
-
-               RQ_TRACE(rq, "hold release\n");
-
-               GEM_BUG_ON(!i915_request_on_hold(rq));
-               GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
-
-               i915_request_clear_hold(rq);
-               list_move_tail(&rq->sched.link,
-                              i915_sched_lookup_priolist(rq->engine,
-                                                         rq_prio(rq)));
-               set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
-               /* Also release any children on this engine that are ready */
-               for_each_waiter(p, rq) {
-                       struct i915_request *w =
-                               container_of(p->waiter, typeof(*w), sched);
-
-                       if (p->flags & I915_DEPENDENCY_WEAK)
-                               continue;
-
-                       /* Propagate any change in error status */
-                       if (rq->fence.error)
-                               i915_request_set_error_once(w, rq->fence.error);
-
-                       if (w->engine != rq->engine)
-                               continue;
-
-                       if (!i915_request_on_hold(w))
-                               continue;
-
-                       /* Check that no other parents are also on hold */
-                       if (hold_request(w))
-                               continue;
-
-                       list_move_tail(&w->sched.link, &list);
-               }
-
-               rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
-       } while (rq);
-}
-
 static void execlists_unhold(struct intel_engine_cs *engine,
                             struct i915_request *rq)
 {
        spin_lock_irq(&engine->active.lock);
-
-       /*
-        * Move this request back to the priority queue, and all of its
-        * children and grandchildren that were suspended along with it.
-        */
-       __execlists_unhold(rq);
-
-       if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
-               engine->execlists.queue_priority_hint = rq_prio(rq);
-               tasklet_hi_schedule(&engine->execlists.tasklet);
-       }
-
+       __intel_engine_unhold_request(engine, rq);
        spin_unlock_irq(&engine->active.lock);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 24bb0d0c8ed0..6ad0acd96cd2 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -572,6 +572,126 @@ __intel_engine_rewind_requests(struct intel_engine_cs 
*engine)
        return active;
 }
 
+bool __intel_engine_hold_request(struct intel_engine_cs *engine,
+                                struct i915_request *rq)
+{
+       LIST_HEAD(list);
+
+       lockdep_assert_held(&engine->active.lock);
+       GEM_BUG_ON(i915_request_on_hold(rq));
+       GEM_BUG_ON(rq->engine != engine);
+
+       if (__i915_request_is_complete(rq)) /* too late! */
+               return false;
+
+       /*
+        * Transfer this request onto the hold queue to prevent it
+        * being resumbitted to HW (and potentially completed) before we have
+        * released it. Since we may have already submitted following
+        * requests, we need to remove those as well.
+        */
+       do {
+               struct i915_dependency *p;
+
+               if (i915_request_is_active(rq))
+                       __i915_request_unsubmit(rq);
+
+               clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+               list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+               i915_request_set_hold(rq);
+               RQ_TRACE(rq, "on hold\n");
+
+               for_each_waiter(p, rq) {
+                       struct i915_request *w =
+                               container_of(p->waiter, typeof(*w), sched);
+
+                       if (p->flags & I915_DEPENDENCY_WEAK)
+                               continue;
+
+                       /* Leave semaphores spinning on the other engines */
+                       if (w->engine != engine)
+                               continue;
+
+                       if (!i915_request_is_ready(w))
+                               continue;
+
+                       if (__i915_request_is_complete(w))
+                               continue;
+
+                       if (i915_request_on_hold(w)) /* acts as a visited bit */
+                               continue;
+
+                       list_move_tail(&w->sched.link, &list);
+               }
+
+               rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+       } while (rq);
+
+       GEM_BUG_ON(list_empty(&engine->active.hold));
+
+       return true;
+}
+
+void __intel_engine_unhold_request(struct intel_engine_cs *engine,
+                                  struct i915_request *rq)
+{
+       LIST_HEAD(list);
+
+       lockdep_assert_held(&engine->active.lock);
+
+       if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+               engine->execlists.queue_priority_hint = rq_prio(rq);
+               tasklet_hi_schedule(&engine->execlists.tasklet);
+       }
+
+       /*
+        * Move this request back to the priority queue, and all of its
+        * children and grandchildren that were suspended along with it.
+        */
+       do {
+               struct i915_dependency *p;
+
+               RQ_TRACE(rq, "hold release\n");
+
+               GEM_BUG_ON(!i915_request_on_hold(rq));
+               GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+               i915_request_clear_hold(rq);
+               list_move_tail(&rq->sched.link,
+                              i915_sched_lookup_priolist(rq->engine,
+                                                         rq_prio(rq)));
+               set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+               /* Also release any children on this engine that are ready */
+               for_each_waiter(p, rq) {
+                       struct i915_request *w =
+                               container_of(p->waiter, typeof(*w), sched);
+
+                       if (p->flags & I915_DEPENDENCY_WEAK)
+                               continue;
+
+                       /* Propagate any change in error status */
+                       if (rq->fence.error)
+                               i915_request_set_error_once(w, rq->fence.error);
+
+                       if (w->engine != engine)
+                               continue;
+
+                       /* We also treat the on-hold status as a visited bit */
+                       if (!i915_request_on_hold(w))
+                               continue;
+
+                       /* Check that no other parents are also on hold [BFS] */
+                       if (hold_request(w))
+                               continue;
+
+                       list_move_tail(&w->sched.link, &list);
+               }
+
+               rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+       } while (rq);
+}
+
 void i915_sched_node_init(struct i915_sched_node *node)
 {
        spin_lock_init(&node->lock);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h 
b/drivers/gpu/drm/i915/i915_scheduler.h
index 50fdc7168d38..b4b722982870 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -44,6 +44,11 @@ void i915_request_enqueue(struct i915_request *request);
 struct i915_request *
 __intel_engine_rewind_requests(struct intel_engine_cs *engine);
 
+bool __intel_engine_hold_request(struct intel_engine_cs *engine,
+                                struct i915_request *request);
+void __intel_engine_unhold_request(struct intel_engine_cs *engine,
+                                  struct i915_request *request);
+
 struct list_head *
 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to