In the next patch, we will begin processing the CSB from inside the interrupt handler. This means that updating the execlists->port[] will no longer be locked by the tasklet but by the engine->timeline.lock instead. Pull dequeue and submit under the same lock for protection. (An alternative, future, plan is to keep the in/out arrays separate for concurrent processing and reduced lock coverage.)
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> --- drivers/gpu/drm/i915/intel_lrc.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index ea85e2a50b83..703135637798 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -562,7 +562,7 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists) execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT); } -static bool __execlists_dequeue(struct intel_engine_cs *engine) +static void __execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; struct execlist_port *port = execlists->port; @@ -617,11 +617,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * the HW to indicate that it has had a chance to respond. */ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) - return false; + return; if (need_preempt(engine, last, execlists->queue_priority)) { inject_preempt_context(engine); - return false; + return; } /* @@ -646,7 +646,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * priorities of the ports haven't been switch. */ if (port_count(&port[1])) - return false; + return; /* * WaIdleLiteRestore:bdw,skl @@ -746,8 +746,10 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) port != execlists->port ? rq_prio(last) : INT_MIN; execlists->first = rb; - if (submit) + if (submit) { port_assign(port, last); + execlists_submit_ports(engine); + } /* We must always keep the beast fed if we have work piled up */ GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); @@ -756,24 +758,19 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) if (last) execlists_user_begin(execlists, execlists->port); - return submit; + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } static void execlists_dequeue(struct intel_engine_cs *engine) { - struct intel_engine_execlists * const execlists = &engine->execlists; unsigned long flags; - bool submit; spin_lock_irqsave(&engine->timeline.lock, flags); - submit = __execlists_dequeue(engine); + __execlists_dequeue(engine); spin_unlock_irqrestore(&engine->timeline.lock, flags); - - if (submit) - execlists_submit_ports(engine); - - GEM_BUG_ON(port_isset(execlists->port) && - !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); } void @@ -1156,11 +1153,6 @@ static void execlists_submission_tasklet(unsigned long data) if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) execlists_dequeue(engine); - - /* If the engine is now idle, so should be the flag; and vice versa. */ - GEM_BUG_ON(execlists_is_active(&engine->execlists, - EXECLISTS_ACTIVE_USER) == - !port_isset(engine->execlists.port)); } static void queue_request(struct intel_engine_cs *engine, -- 2.17.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx