[Intel-gfx] [PATCH 18/27] drm/i915/guc: Release submit fence from an irq_work

2021-08-25 Thread Matthew Brost
A subsequent patch will flip the locking hierarchy from
ce->guc_state.lock -> sched_engine->lock to sched_engine->lock ->
ce->guc_state.lock. As such we need to release the submit fence for a
request from an IRQ to break a lock inversion - i.e. the fence must be
release went holding ce->guc_state.lock and the releasing of the can
acquire sched_engine->lock.

v2:
 (Daniele)
  - Delete request from list before calling irq_work_queue

Reviewed-by: Daniele Ceraolo Spurio 
Signed-off-by: Matthew Brost 
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 22 ---
 drivers/gpu/drm/i915/i915_request.h   |  5 +
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index baf789f37d42..c86aae0899e5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2035,17 +2035,32 @@ static const struct intel_context_ops guc_context_ops = 
{
.create_virtual = guc_create_virtual,
 };
 
+static void submit_work_cb(struct irq_work *wrk)
+{
+   struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+   might_lock(&rq->engine->sched_engine->lock);
+   i915_sw_fence_complete(&rq->submit);
+}
+
 static void __guc_signal_context_fence(struct intel_context *ce)
 {
-   struct i915_request *rq;
+   struct i915_request *rq, *rn;
 
lockdep_assert_held(&ce->guc_state.lock);
 
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
 
-   list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
-   i915_sw_fence_complete(&rq->submit);
+   /*
+* Use an IRQ to ensure locking order of sched_engine->lock ->
+* ce->guc_state.lock is preserved.
+*/
+   list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
+guc_fence_link) {
+   list_del(&rq->guc_fence_link);
+   irq_work_queue(&rq->submit_work);
+   }
 
INIT_LIST_HEAD(&ce->guc_state.fences);
 }
@@ -2155,6 +2170,7 @@ static int guc_request_alloc(struct i915_request *rq)
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
+   init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
 
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index 1bc1349ba3c2..d818cfbfc41d 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -218,6 +218,11 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
+   /**
+* @submit_work: complete submit fence from an IRQ if needed for
+* locking hierarchy reasons.
+*/
+   struct irq_work submit_work;
 
/*
 * A list of everyone we wait upon, and everyone who waits upon us.
-- 
2.32.0



Re: [Intel-gfx] [PATCH 18/27] drm/i915/guc: Release submit fence from an irq_work

2021-08-24 Thread Daniele Ceraolo Spurio




On 8/18/2021 11:16 PM, Matthew Brost wrote:

A subsequent patch will flip the locking hierarchy from
ce->guc_state.lock -> sched_engine->lock to sched_engine->lock ->
ce->guc_state.lock. As such we need to release the submit fence for a
request from an IRQ to break a lock inversion - i.e. the fence must be
release went holding ce->guc_state.lock and the releasing of the can
acquire sched_engine->lock.

Signed-off-by: Matthew Brost 
---
  drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 15 ++-
  drivers/gpu/drm/i915/i915_request.h   |  5 +
  2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9a53bae367b1..deb2e821e441 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2025,6 +2025,14 @@ static const struct intel_context_ops guc_context_ops = {
.create_virtual = guc_create_virtual,
  };
  
+static void submit_work_cb(struct irq_work *wrk)

+{
+   struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+   might_lock(&rq->engine->sched_engine->lock);
+   i915_sw_fence_complete(&rq->submit);
+}
+
  static void __guc_signal_context_fence(struct intel_context *ce)
  {
struct i915_request *rq;
@@ -2034,8 +2042,12 @@ static void __guc_signal_context_fence(struct 
intel_context *ce)
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
  
+	/*

+* Use an IRQ to ensure locking order of sched_engine->lock ->
+* ce->guc_state.lock is preserved.
+*/
list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
-   i915_sw_fence_complete(&rq->submit);
+   irq_work_queue(&rq->submit_work);


I think we should clear rq->guc_fence_link before queueing the work, 
just to make sure the work can't interfere back to this list (I know we 
don't now, it's just for future proofing paranoia). with that:


Reviewed-by: Daniele Ceraolo Spurio 

Daniele

  
  	INIT_LIST_HEAD(&ce->guc_state.fences);

  }
@@ -2145,6 +2157,7 @@ static int guc_request_alloc(struct i915_request *rq)
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
+   init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
  
  		list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);

diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index 1bc1349ba3c2..d818cfbfc41d 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -218,6 +218,11 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
+   /**
+* @submit_work: complete submit fence from an IRQ if needed for
+* locking hierarchy reasons.
+*/
+   struct irq_work submit_work;
  
  	/*

 * A list of everyone we wait upon, and everyone who waits upon us.




[Intel-gfx] [PATCH 18/27] drm/i915/guc: Release submit fence from an irq_work

2021-08-18 Thread Matthew Brost
A subsequent patch will flip the locking hierarchy from
ce->guc_state.lock -> sched_engine->lock to sched_engine->lock ->
ce->guc_state.lock. As such we need to release the submit fence for a
request from an IRQ to break a lock inversion - i.e. the fence must be
release went holding ce->guc_state.lock and the releasing of the can
acquire sched_engine->lock.

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 15 ++-
 drivers/gpu/drm/i915/i915_request.h   |  5 +
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9a53bae367b1..deb2e821e441 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2025,6 +2025,14 @@ static const struct intel_context_ops guc_context_ops = {
.create_virtual = guc_create_virtual,
 };
 
+static void submit_work_cb(struct irq_work *wrk)
+{
+   struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+   might_lock(&rq->engine->sched_engine->lock);
+   i915_sw_fence_complete(&rq->submit);
+}
+
 static void __guc_signal_context_fence(struct intel_context *ce)
 {
struct i915_request *rq;
@@ -2034,8 +2042,12 @@ static void __guc_signal_context_fence(struct 
intel_context *ce)
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
 
+   /*
+* Use an IRQ to ensure locking order of sched_engine->lock ->
+* ce->guc_state.lock is preserved.
+*/
list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
-   i915_sw_fence_complete(&rq->submit);
+   irq_work_queue(&rq->submit_work);
 
INIT_LIST_HEAD(&ce->guc_state.fences);
 }
@@ -2145,6 +2157,7 @@ static int guc_request_alloc(struct i915_request *rq)
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
+   init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
 
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index 1bc1349ba3c2..d818cfbfc41d 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -218,6 +218,11 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
+   /**
+* @submit_work: complete submit fence from an IRQ if needed for
+* locking hierarchy reasons.
+*/
+   struct irq_work submit_work;
 
/*
 * A list of everyone we wait upon, and everyone who waits upon us.
-- 
2.32.0