We'd like to reuse the priolist lookup in request resubmission path,
let's split insert_request to make that happen.
Cc: Chris Wilson
Cc: Joonas Lahtinen
Cc: Tvrtko Ursulin
Signed-off-by: Michał Winiarski
---
drivers/gpu/drm/i915/intel_lrc.c | 65
1 file changed, 39 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1255724..c43ca1b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -627,20 +627,15 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
-static bool
-insert_request(struct intel_engine_cs *engine,
- struct i915_priotree *pt,
- int prio)
+static struct i915_priolist *
+priolist_lookup(struct intel_engine_cs *engine, int prio, bool *first)
{
struct i915_priolist *p;
struct rb_node **parent, *rb;
- bool first = true;
if (unlikely(engine->no_priolist))
prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
- /* most positive priority is scheduled first, equal priorities fifo */
+ *first = true;
rb = NULL;
parent = &engine->execlist_queue.rb_node;
while (*parent) {
@@ -650,10 +645,10 @@ insert_request(struct intel_engine_cs *engine,
parent = &rb->rb_left;
} else if (prio < p->priority) {
parent = &rb->rb_right;
- first = false;
+ *first = false;
} else {
- list_add_tail(&pt->link, &p->requests);
- return false;
+ *first = false;
+ return p;
}
}
@@ -661,20 +656,10 @@ insert_request(struct intel_engine_cs *engine,
p = &engine->default_priolist;
} else {
p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
- /* Convert an allocation failure to a priority bump */
+
if (unlikely(!p)) {
- prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
- /* To maintain ordering with all rendering, after an
-* allocation failure we have to disable all scheduling.
-* Requests will then be executed in fifo, and schedule
-* will ensure that dependencies are emitted in fifo.
-* There will be still some reordering with existing
-* requests, so if userspace lied about their
-* dependencies that reordering may be visible.
-*/
- engine->no_priolist = true;
- goto find_priolist;
+ *first = false;
+ return ERR_PTR(-ENOMEM);
}
}
@@ -683,11 +668,39 @@ insert_request(struct intel_engine_cs *engine,
rb_insert_color(&p->node, &engine->execlist_queue);
INIT_LIST_HEAD(&p->requests);
- list_add_tail(&pt->link, &p->requests);
- if (first)
+ if (*first)
engine->execlist_first = &p->node;
+ return p;
+}
+
+static bool
+insert_request(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
+{
+ struct i915_priolist *p;
+ bool first = false;
+
+ p = priolist_lookup(engine, prio, &first);
+
+ if (unlikely(IS_ERR(p))) {
+ /* To maintain ordering with all rendering, after an
+* allocation failure we have to disable all scheduling.
+* Requests will then be executed in fifo, and schedule
+* will ensure that dependencies are emitted in fifo.
+* There will be still some reordering with existing
+* request, so if userspace lied about their
+* dependdencies that reordering may be visible.
+*/
+ engine->no_priolist = true;
+ p = priolist_lookup(engine, I915_PRIORITY_NORMAL, &first);
+ }
+
+ /* most positive priority is scheduled first, equal priorities fifo */
+ list_add_tail(&pt->link, &p->requests);
+
return first;
}
--
2.9.4
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx