If we resubmitting the active context, simply skip the submission as performing the submission from the interrupt handler has higher throughput than continually provoking lite-restores. If however, we find ourselves with a new client, we check whether or not we can dequeue into the second port or to resolve preemption.
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> --- drivers/gpu/drm/i915/intel_lrc.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 5e9cff19a6b6..e2e219156fbf 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1205,10 +1205,22 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) tasklet_hi_schedule(&execlists->tasklet); } -static void submit_queue(struct intel_engine_cs *engine, int prio) +static bool inflight(const struct intel_engine_execlists *execlists, + const struct i915_request *rq) { - if (prio > engine->execlists.queue_priority_hint) { - engine->execlists.queue_priority_hint = prio; + const struct i915_request *active = port_request(execlists->port); + + return active && active->hw_context == rq->hw_context; +} + +static void submit_queue(struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + + if (rq_prio(rq) > execlists->queue_priority_hint && + !inflight(execlists, rq)) { + execlists->queue_priority_hint = rq_prio(rq); __submit_queue_imm(engine); } } @@ -1226,7 +1238,7 @@ static void execlists_submit_request(struct i915_request *request) GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(list_empty(&request->sched.link)); - submit_queue(engine, rq_prio(request)); + submit_queue(engine, request); spin_unlock_irqrestore(&engine->timeline.lock, flags); } -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx