[Intel-gfx] [PATCH] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context

2020-03-27 Thread Chris Wilson
In what seems remarkably similar to the w/a required to not reload an
idle context with HEAD==TAIL, it appears we must prevent the HW from
switching to an idle context in ELSP[1], while simultaneously trying to
preempt the HW to run another context and a continuation of the idle
context (which is no longer idle).

  process_csb: vecs0: cs-irq head=0, tail=1
  process_csb: vecs0: csb[1]: status=0x0882:0x0020
  trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
  trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
  trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
  process_csb: vecs0: cs-irq head=1, tail=2
  process_csb: vecs0: csb[2]: status=0x0814:0x0040
  trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
  process_csb: vecs0: cs-irq head=2, tail=5
  process_csb: vecs0: csb[3]: status=0x0812:0x0020
  trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
  trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
  process_csb: vecs0: csb[4]: status=0x0814:0x0060
  trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
  process_csb: vecs0: csb[5]: status=0x0818:0x0020
  trace_ports: vecs0: completed { 8c0:32, 0:0 }
  process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, 
ctl:, mode:0200}
  process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, 
hwsp:30},
  process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
  process_csb: GEM_BUG_ON("context completed before request")

Fortunately, we just so happen to have a semaphore in place to prevent
the ring HEAD from proceeding past the end of a request that we can use
to fix the HEAD in position as we reprogram ELSP.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
Signed-off-by: Chris Wilson 
Cc: Tvrtko Ursulin 
Cc: Mika Kuoppala 
---
 drivers/gpu/drm/i915/gt/intel_gt_irq.c |  3 ++
 drivers/gpu/drm/i915/gt/intel_lrc.c| 71 +-
 2 files changed, 38 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c 
b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f0e7fd95165a..3afe05558816 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,9 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 {
bool tasklet = false;
 
+   if (engine && iir & 0x)
+   ENGINE_TRACE(engine, "iir: %04x\n", iir);
+
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
u32 eir;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b12355048501..9e24ff7451a9 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request 
**ports, int count)
memset_p((void **)ports, NULL, count);
 }
 
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool execlists_dequeue(struct intel_engine_cs *engine)
 {
struct intel_engine_execlists * const execlists = >execlists;
struct i915_request **port = execlists->pending;
@@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 execlists->queue_priority_hint);
record_preemption(execlists);
 
-   /*
-* Don't let the RING_HEAD advance past the breadcrumb
-* as we unwind (and until we resubmit) so that we do
-* not accidentally tell it to go backwards.
-*/
-   ring_set_paused(engine, 1);
-
/*
 * Note that we have not stopped the GPU at this point,
 * so we are unwinding the incomplete requests as they
@@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 last->sched.attr.priority,
 execlists->queue_priority_hint);
 
-   ring_set_paused(engine, 1);
defer_active(engine);
 
/*
@@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 * of timeslices, our queue might be.
 */
start_timeslice(engine);
-   return;
+   return false;
}
}
}
@@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
}
 
if (last && !can_merge_rq(last, rq)) {
+   /* leave this for another sibling */
spin_unlock(>base.active.lock);
start_timeslice(engine);
-   return; /* leave this for another sibling 

[Intel-gfx] [PATCH] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context

2020-03-27 Thread Chris Wilson
In what seems remarkably similar to the w/a required to not reload an
idle context with HEAD==TAIL, it appears we must prevent the HW from
switching to an idle context in ELSP[1], while simultaneously trying to
preempt the HW to run another context and a continuation of the idle
context (which is no longer idle).

  process_csb: vecs0: cs-irq head=0, tail=1
  process_csb: vecs0: csb[1]: status=0x0882:0x0020
  trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
  trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
  trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
  process_csb: vecs0: cs-irq head=1, tail=2
  process_csb: vecs0: csb[2]: status=0x0814:0x0040
  trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
  process_csb: vecs0: cs-irq head=2, tail=5
  process_csb: vecs0: csb[3]: status=0x0812:0x0020
  trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
  trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
  process_csb: vecs0: csb[4]: status=0x0814:0x0060
  trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
  process_csb: vecs0: csb[5]: status=0x0818:0x0020
  trace_ports: vecs0: completed { 8c0:32, 0:0 }
  process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, 
ctl:, mode:0200}
  process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, 
hwsp:30},
  process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
  process_csb: GEM_BUG_ON("context completed before request")

Fortunately, we just so happen to have a semaphore in place to prevent
the ring HEAD from proceeding past the end of a request that we can use
to fix the HEAD in position as we reprogram ELSP.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
Signed-off-by: Chris Wilson 
Cc: Tvrtko Ursulin 
Cc: Mika Kuoppala 
---
 drivers/gpu/drm/i915/gt/intel_gt_irq.c |  3 ++
 drivers/gpu/drm/i915/gt/intel_lrc.c| 71 +-
 2 files changed, 38 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c 
b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f0e7fd95165a..06bdc48c60d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,9 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 {
bool tasklet = false;
 
+   if (engine)
+   ENGINE_TRACE(engine, "iir: %04x\n", iir);
+
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
u32 eir;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b12355048501..9e24ff7451a9 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request 
**ports, int count)
memset_p((void **)ports, NULL, count);
 }
 
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool execlists_dequeue(struct intel_engine_cs *engine)
 {
struct intel_engine_execlists * const execlists = >execlists;
struct i915_request **port = execlists->pending;
@@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 execlists->queue_priority_hint);
record_preemption(execlists);
 
-   /*
-* Don't let the RING_HEAD advance past the breadcrumb
-* as we unwind (and until we resubmit) so that we do
-* not accidentally tell it to go backwards.
-*/
-   ring_set_paused(engine, 1);
-
/*
 * Note that we have not stopped the GPU at this point,
 * so we are unwinding the incomplete requests as they
@@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 last->sched.attr.priority,
 execlists->queue_priority_hint);
 
-   ring_set_paused(engine, 1);
defer_active(engine);
 
/*
@@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 * of timeslices, our queue might be.
 */
start_timeslice(engine);
-   return;
+   return false;
}
}
}
@@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
}
 
if (last && !can_merge_rq(last, rq)) {
+   /* leave this for another sibling */
spin_unlock(>base.active.lock);
start_timeslice(engine);
-   return; /* leave this for another sibling */
+  

[Intel-gfx] [PATCH] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context

2020-03-26 Thread Chris Wilson
In what seems remarkably similar to the w/a required to not reload an
idle context with HEAD==TAIL, it appears we must prevent the HW from
switching to an idle context in ELSP[1], while simultaneously trying to
preempt the HW to run another context and a continuation of the idle
context (which is no longer idle).

  process_csb: vecs0: cs-irq head=0, tail=1
  process_csb: vecs0: csb[1]: status=0x0882:0x0020
  trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
  trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
  trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
  process_csb: vecs0: cs-irq head=1, tail=2
  process_csb: vecs0: csb[2]: status=0x0814:0x0040
  trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
  process_csb: vecs0: cs-irq head=2, tail=5
  process_csb: vecs0: csb[3]: status=0x0812:0x0020
  trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
  trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
  process_csb: vecs0: csb[4]: status=0x0814:0x0060
  trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
  process_csb: vecs0: csb[5]: status=0x0818:0x0020
  trace_ports: vecs0: completed { 8c0:32, 0:0 }
  process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, 
ctl:, mode:0200}
  process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, 
hwsp:30},
  process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
  process_csb: process_csb:2449 GEM_BUG_ON("context completed before request")

Fortunately, we just so happen to have a semaphore in place to prevent
the ring HEAD from proceeding past the end of a request that we can use
to fix the HEAD in position as we reprogram ELSP.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
Signed-off-by: Chris Wilson 
Cc: Tvrtko Ursulin 
Cc: Mika Kuoppala 
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 71 ++---
 1 file changed, 35 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b12355048501..9e24ff7451a9 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request 
**ports, int count)
memset_p((void **)ports, NULL, count);
 }
 
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool execlists_dequeue(struct intel_engine_cs *engine)
 {
struct intel_engine_execlists * const execlists = >execlists;
struct i915_request **port = execlists->pending;
@@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 execlists->queue_priority_hint);
record_preemption(execlists);
 
-   /*
-* Don't let the RING_HEAD advance past the breadcrumb
-* as we unwind (and until we resubmit) so that we do
-* not accidentally tell it to go backwards.
-*/
-   ring_set_paused(engine, 1);
-
/*
 * Note that we have not stopped the GPU at this point,
 * so we are unwinding the incomplete requests as they
@@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 last->sched.attr.priority,
 execlists->queue_priority_hint);
 
-   ring_set_paused(engine, 1);
defer_active(engine);
 
/*
@@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 * of timeslices, our queue might be.
 */
start_timeslice(engine);
-   return;
+   return false;
}
}
}
@@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
}
 
if (last && !can_merge_rq(last, rq)) {
+   /* leave this for another sibling */
spin_unlock(>base.active.lock);
start_timeslice(engine);
-   return; /* leave this for another sibling */
+   return false;
}
 
ENGINE_TRACE(engine,
@@ -2193,32 +2186,30 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 * interrupt for secondary ports).
 */
execlists->queue_priority_hint = queue_prio(execlists);
+   if (!submit)
+   return false;
 
-   if (submit) {
-   *port = execlists_schedule_in(last, port - execlists->pending);
-   execlists->switch_priority_hint =
-

[Intel-gfx] [PATCH] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context

2020-03-26 Thread Chris Wilson
In what seems remarkably similar to the w/a required to not reload an
idle context with HEAD==TAIL, it appears we must prevent the HW from
switching to an idle context in ELSP[1], while simultaneously trying to
preempt the HW to run another context and a continuation of the idle
context (which is no longer idle).

Fortunately, we just so happen to have a semaphore in place to prevent
the ring HEAD from proceeding past the end of a request that we can use
to fix the HEAD in position as we reprogram ELSP.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
Signed-off-by: Chris Wilson 
Cc: Tvrtko Ursulin 
Cc: Mika Kuoppala 
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 71 ++---
 1 file changed, 35 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b12355048501..9e24ff7451a9 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request 
**ports, int count)
memset_p((void **)ports, NULL, count);
 }
 
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool execlists_dequeue(struct intel_engine_cs *engine)
 {
struct intel_engine_execlists * const execlists = >execlists;
struct i915_request **port = execlists->pending;
@@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 execlists->queue_priority_hint);
record_preemption(execlists);
 
-   /*
-* Don't let the RING_HEAD advance past the breadcrumb
-* as we unwind (and until we resubmit) so that we do
-* not accidentally tell it to go backwards.
-*/
-   ring_set_paused(engine, 1);
-
/*
 * Note that we have not stopped the GPU at this point,
 * so we are unwinding the incomplete requests as they
@@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 last->sched.attr.priority,
 execlists->queue_priority_hint);
 
-   ring_set_paused(engine, 1);
defer_active(engine);
 
/*
@@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 * of timeslices, our queue might be.
 */
start_timeslice(engine);
-   return;
+   return false;
}
}
}
@@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
}
 
if (last && !can_merge_rq(last, rq)) {
+   /* leave this for another sibling */
spin_unlock(>base.active.lock);
start_timeslice(engine);
-   return; /* leave this for another sibling */
+   return false;
}
 
ENGINE_TRACE(engine,
@@ -2193,32 +2186,30 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 * interrupt for secondary ports).
 */
execlists->queue_priority_hint = queue_prio(execlists);
+   if (!submit)
+   return false;
 
-   if (submit) {
-   *port = execlists_schedule_in(last, port - execlists->pending);
-   execlists->switch_priority_hint =
-   switch_prio(engine, *execlists->pending);
+   *port = execlists_schedule_in(last, port - execlists->pending);
+   execlists->switch_priority_hint =
+   switch_prio(engine, *execlists->pending);
 
-   /*
-* Skip if we ended up with exactly the same set of requests,
-* e.g. trying to timeslice a pair of ordered contexts
-*/
-   if (!memcmp(active, execlists->pending,
-   (port - execlists->pending + 1) * sizeof(*port))) {
-   do
-   execlists_schedule_out(fetch_and_zero(port));
-   while (port-- != execlists->pending);
-
-   goto skip_submit;
-   }
-   clear_ports(port + 1, last_port - port);
+   /*
+* Skip if we ended up with exactly the same set of requests,
+* e.g. trying to timeslice a pair of ordered contexts
+*/
+   if (!memcmp(active, execlists->pending,
+   (port - execlists->pending + 1) * sizeof(*port))) {
+   do
+