Since we process schedule-in of a context after submitting the request,
if we decide to reset the context at that time, we also have to cancel
the requets we have marked for submission.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 .../drm/i915/gt/intel_execlists_submission.c  | 22 +++++++++++++++++--
 drivers/gpu/drm/i915/i915_request.c           |  2 ++
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 1fae6c6f3868..2123d9566061 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -466,6 +466,23 @@ static void intel_engine_context_out(struct 
intel_engine_cs *engine)
        write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
+static struct i915_request *
+cancel_requests(const struct intel_timeline * const tl, struct i915_request 
*rq)
+{
+       struct i915_request *active = rq;
+
+       list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+               if (__i915_request_is_complete(rq))
+                       break;
+
+               i915_request_set_error_once(rq, -EIO);
+               __i915_request_skip(rq);
+               active = rq;
+       }
+
+       return active;
+}
+
 static void reset_active(struct i915_request *rq,
                         struct intel_engine_cs *engine)
 {
@@ -487,14 +504,15 @@ static void reset_active(struct i915_request *rq,
         * remain correctly ordered. And we defer to __i915_request_submit()
         * so that all asynchronous waits are correctly handled.
         */
-       ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
+       rq = cancel_requests(ce->timeline, rq);
+       ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
                     rq->fence.context, rq->fence.seqno);
 
        /* On resubmission of the active request, payload will be scrubbed */
        if (__i915_request_is_complete(rq))
                head = rq->tail;
        else
-               head = active_request(ce->timeline, rq)->head;
+               head = rq->head;
        head = intel_ring_wrap(ce->ring, head);
 
        /* Scrub the context image to prevent replaying the previous batch */
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index 6578faf6eed8..ad3b6a4f424f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -490,6 +490,8 @@ void __i915_request_skip(struct i915_request *rq)
        if (rq->infix == rq->postfix)
                return;
 
+       RQ_TRACE(rq, "error: %d\n", rq->fence.error);
+
        /*
         * As this request likely depends on state from the lost
         * context, clear out all the user operations leaving the
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to