[Intel-gfx] [PATCH 14/23] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well, v2.

2020-03-31 Thread Maarten Lankhorst
This is the last part outside of selftests that still don't use the
correct lock ordering of timeline->mutex vs resv_lock.

With gem fixed, there are a few places that still get locking wrong:
- gvt/scheduler.c
- i915_perf.c
- Most if not all selftests.

Changes since v1:
- Add intel_engine_pm_get/put() calls to fix use-after-free when using
  intel_engine_get_pool().

Signed-off-by: Maarten Lankhorst 
---
 .../gpu/drm/i915/gem/i915_gem_client_blt.c|  80 +++--
 .../gpu/drm/i915/gem/i915_gem_object_blt.c| 156 +++---
 .../gpu/drm/i915/gem/i915_gem_object_blt.h|   3 +
 3 files changed, 165 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 
b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 5d94a77f9bdd..10df576e785f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -157,6 +157,7 @@ static void clear_pages_worker(struct work_struct *work)
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
@@ -172,17 +173,20 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
 
-   err = i915_vma_pin(vma, 0, 0, PIN_USER);
-   if (unlikely(err))
+   i915_gem_ww_ctx_init(&ww, false);
+   intel_engine_pm_get(w->ce->engine);
+retry:
+   err = intel_context_pin_ww(w->ce, &ww);
+   if (err)
goto out_signal;
 
-   batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+   batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
-   goto out_unpin;
+   goto out_ctx;
}
 
-   rq = intel_context_create_request(w->ce);
+   rq = i915_request_create(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -224,9 +228,19 @@ static void clear_pages_worker(struct work_struct *work)
i915_request_add(rq);
 out_batch:
intel_emit_vma_release(w->ce, batch);
-out_unpin:
-   i915_vma_unpin(vma);
+out_ctx:
+   intel_context_unpin(w->ce);
 out_signal:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   i915_vma_unpin(w->sleeve->vma);
+   intel_engine_pm_put(w->ce->engine);
+
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
@@ -234,6 +248,45 @@ static void clear_pages_worker(struct work_struct *work)
}
 }
 
+static int pin_wait_clear_pages_work(struct clear_pages_work *w,
+struct intel_context *ce)
+{
+   struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
+   int err;
+
+   i915_gem_ww_ctx_init(&ww, false);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto out;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+   if (unlikely(err))
+   goto out;
+
+   err = i915_sw_fence_await_reservation(&w->wait,
+ vma->obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP);
+   if (err)
+   goto err_unpin_vma;
+
+   dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
+
+err_unpin_vma:
+   if (err)
+   i915_vma_unpin(vma);
+out:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+   return err;
+}
+
 static int __i915_sw_fence_call
 clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
@@ -287,18 +340,9 @@ int i915_gem_schedule_fill_pages_blt(struct 
drm_i915_gem_object *obj,
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
 
-   i915_gem_object_lock(obj, NULL);
-   err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
- I915_FENCE_GFP);
-   if (err < 0) {
+   err = pin_wait_clear_pages_work(work, ce);
+   if (err < 0)
dma_fence_set_error(&work->dma, err);
-   } else {
-   dma_resv_add_e

[Intel-gfx] [PATCH 14/23] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well, v2.

2020-04-02 Thread Maarten Lankhorst
This is the last part outside of selftests that still don't use the
correct lock ordering of timeline->mutex vs resv_lock.

With gem fixed, there are a few places that still get locking wrong:
- gvt/scheduler.c
- i915_perf.c
- Most if not all selftests.

Changes since v1:
- Add intel_engine_pm_get/put() calls to fix use-after-free when using
  intel_engine_get_pool().

Signed-off-by: Maarten Lankhorst 
---
 .../gpu/drm/i915/gem/i915_gem_client_blt.c|  80 +++--
 .../gpu/drm/i915/gem/i915_gem_object_blt.c| 156 +++---
 .../gpu/drm/i915/gem/i915_gem_object_blt.h|   3 +
 3 files changed, 165 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 
b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 5d94a77f9bdd..10df576e785f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -157,6 +157,7 @@ static void clear_pages_worker(struct work_struct *work)
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
@@ -172,17 +173,20 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
 
-   err = i915_vma_pin(vma, 0, 0, PIN_USER);
-   if (unlikely(err))
+   i915_gem_ww_ctx_init(&ww, false);
+   intel_engine_pm_get(w->ce->engine);
+retry:
+   err = intel_context_pin_ww(w->ce, &ww);
+   if (err)
goto out_signal;
 
-   batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+   batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
-   goto out_unpin;
+   goto out_ctx;
}
 
-   rq = intel_context_create_request(w->ce);
+   rq = i915_request_create(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -224,9 +228,19 @@ static void clear_pages_worker(struct work_struct *work)
i915_request_add(rq);
 out_batch:
intel_emit_vma_release(w->ce, batch);
-out_unpin:
-   i915_vma_unpin(vma);
+out_ctx:
+   intel_context_unpin(w->ce);
 out_signal:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   i915_vma_unpin(w->sleeve->vma);
+   intel_engine_pm_put(w->ce->engine);
+
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
@@ -234,6 +248,45 @@ static void clear_pages_worker(struct work_struct *work)
}
 }
 
+static int pin_wait_clear_pages_work(struct clear_pages_work *w,
+struct intel_context *ce)
+{
+   struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
+   int err;
+
+   i915_gem_ww_ctx_init(&ww, false);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto out;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+   if (unlikely(err))
+   goto out;
+
+   err = i915_sw_fence_await_reservation(&w->wait,
+ vma->obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP);
+   if (err)
+   goto err_unpin_vma;
+
+   dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
+
+err_unpin_vma:
+   if (err)
+   i915_vma_unpin(vma);
+out:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+   return err;
+}
+
 static int __i915_sw_fence_call
 clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
@@ -287,18 +340,9 @@ int i915_gem_schedule_fill_pages_blt(struct 
drm_i915_gem_object *obj,
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
 
-   i915_gem_object_lock(obj, NULL);
-   err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
- I915_FENCE_GFP);
-   if (err < 0) {
+   err = pin_wait_clear_pages_work(work, ce);
+   if (err < 0)
dma_fence_set_error(&work->dma, err);
-   } else {
-   dma_resv_add_e

[Intel-gfx] [PATCH 14/23] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well, v2.

2020-04-08 Thread Maarten Lankhorst
This is the last part outside of selftests that still don't use the
correct lock ordering of timeline->mutex vs resv_lock.

With gem fixed, there are a few places that still get locking wrong:
- gvt/scheduler.c
- i915_perf.c
- Most if not all selftests.

Changes since v1:
- Add intel_engine_pm_get/put() calls to fix use-after-free when using
  intel_engine_get_pool().

Signed-off-by: Maarten Lankhorst 
---
 .../gpu/drm/i915/gem/i915_gem_client_blt.c|  80 +++--
 .../gpu/drm/i915/gem/i915_gem_object_blt.c| 156 +++---
 .../gpu/drm/i915/gem/i915_gem_object_blt.h|   3 +
 3 files changed, 165 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 
b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 5d94a77f9bdd..10df576e785f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -157,6 +157,7 @@ static void clear_pages_worker(struct work_struct *work)
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
@@ -172,17 +173,20 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
 
-   err = i915_vma_pin(vma, 0, 0, PIN_USER);
-   if (unlikely(err))
+   i915_gem_ww_ctx_init(&ww, false);
+   intel_engine_pm_get(w->ce->engine);
+retry:
+   err = intel_context_pin_ww(w->ce, &ww);
+   if (err)
goto out_signal;
 
-   batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+   batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
-   goto out_unpin;
+   goto out_ctx;
}
 
-   rq = intel_context_create_request(w->ce);
+   rq = i915_request_create(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -224,9 +228,19 @@ static void clear_pages_worker(struct work_struct *work)
i915_request_add(rq);
 out_batch:
intel_emit_vma_release(w->ce, batch);
-out_unpin:
-   i915_vma_unpin(vma);
+out_ctx:
+   intel_context_unpin(w->ce);
 out_signal:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   i915_vma_unpin(w->sleeve->vma);
+   intel_engine_pm_put(w->ce->engine);
+
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
@@ -234,6 +248,45 @@ static void clear_pages_worker(struct work_struct *work)
}
 }
 
+static int pin_wait_clear_pages_work(struct clear_pages_work *w,
+struct intel_context *ce)
+{
+   struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
+   int err;
+
+   i915_gem_ww_ctx_init(&ww, false);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto out;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+   if (unlikely(err))
+   goto out;
+
+   err = i915_sw_fence_await_reservation(&w->wait,
+ vma->obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP);
+   if (err)
+   goto err_unpin_vma;
+
+   dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
+
+err_unpin_vma:
+   if (err)
+   i915_vma_unpin(vma);
+out:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+   return err;
+}
+
 static int __i915_sw_fence_call
 clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
@@ -287,18 +340,9 @@ int i915_gem_schedule_fill_pages_blt(struct 
drm_i915_gem_object *obj,
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
 
-   i915_gem_object_lock(obj, NULL);
-   err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
- I915_FENCE_GFP);
-   if (err < 0) {
+   err = pin_wait_clear_pages_work(work, ce);
+   if (err < 0)
dma_fence_set_error(&work->dma, err);
-   } else {
-   dma_resv_add_e

[Intel-gfx] [PATCH 14/23] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well, v2.

2020-05-11 Thread Maarten Lankhorst
This is the last part outside of selftests that still don't use the
correct lock ordering of timeline->mutex vs resv_lock.

With gem fixed, there are a few places that still get locking wrong:
- gvt/scheduler.c
- i915_perf.c
- Most if not all selftests.

Changes since v1:
- Add intel_engine_pm_get/put() calls to fix use-after-free when using
  intel_engine_get_pool().

Signed-off-by: Maarten Lankhorst 
---
 .../gpu/drm/i915/gem/i915_gem_client_blt.c|  78 +++--
 .../gpu/drm/i915/gem/i915_gem_object_blt.c| 156 +++---
 .../gpu/drm/i915/gem/i915_gem_object_blt.h|   3 +
 3 files changed, 164 insertions(+), 73 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 
b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index c182091c00ff..c141d7ce8a75 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -156,6 +156,7 @@ static void clear_pages_worker(struct work_struct *work)
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
@@ -171,17 +172,20 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
 
-   err = i915_vma_pin(vma, 0, 0, PIN_USER);
-   if (unlikely(err))
+   i915_gem_ww_ctx_init(&ww, false);
+   intel_engine_pm_get(w->ce->engine);
+retry:
+   err = intel_context_pin_ww(w->ce, &ww);
+   if (err)
goto out_signal;
 
-   batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+   batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
-   goto out_unpin;
+   goto out_ctx;
}
 
-   rq = intel_context_create_request(w->ce);
+   rq = i915_request_create(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -223,9 +227,19 @@ static void clear_pages_worker(struct work_struct *work)
i915_request_add(rq);
 out_batch:
intel_emit_vma_release(w->ce, batch);
-out_unpin:
-   i915_vma_unpin(vma);
+out_ctx:
+   intel_context_unpin(w->ce);
 out_signal:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   i915_vma_unpin(w->sleeve->vma);
+   intel_engine_pm_put(w->ce->engine);
+
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
@@ -233,6 +247,44 @@ static void clear_pages_worker(struct work_struct *work)
}
 }
 
+static int pin_wait_clear_pages_work(struct clear_pages_work *w,
+struct intel_context *ce)
+{
+   struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
+   int err;
+
+   i915_gem_ww_ctx_init(&ww, false);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto out;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+   if (unlikely(err))
+   goto out;
+
+   err = i915_sw_fence_await_reservation(&w->wait,
+ vma->obj->base.resv, NULL,
+ true, 0, I915_FENCE_GFP);
+   if (err)
+   goto err_unpin_vma;
+
+   dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
+
+err_unpin_vma:
+   if (err)
+   i915_vma_unpin(vma);
+out:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+   return err;
+}
+
 static int __i915_sw_fence_call
 clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
@@ -286,17 +338,9 @@ int i915_gem_schedule_fill_pages_blt(struct 
drm_i915_gem_object *obj,
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
 
-   i915_gem_object_lock(obj, NULL);
-   err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL, true, 0,
- I915_FENCE_GFP);
-   if (err < 0) {
+   err = pin_wait_clear_pages_work(work, ce);
+   if (err < 0)
dma_fence_set_error(&work->dma, err);
-   } else {
-   dma_resv_add_excl_fence(obj->base.resv, &work->dma);
-   err = 0;
-   }
-   i915_gem_object_unlock(obj);
 
dma_f

[Intel-gfx] [PATCH 14/23] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well, v2.

2020-05-20 Thread Maarten Lankhorst
This is the last part outside of selftests that still don't use the
correct lock ordering of timeline->mutex vs resv_lock.

With gem fixed, there are a few places that still get locking wrong:
- gvt/scheduler.c
- i915_perf.c
- Most if not all selftests.

Changes since v1:
- Add intel_engine_pm_get/put() calls to fix use-after-free when using
  intel_engine_get_pool().

Signed-off-by: Maarten Lankhorst 
---
 .../gpu/drm/i915/gem/i915_gem_client_blt.c|  78 +++--
 .../gpu/drm/i915/gem/i915_gem_object_blt.c| 157 +++---
 .../gpu/drm/i915/gem/i915_gem_object_blt.h|   3 +
 3 files changed, 164 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 
b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index c182091c00ff..c141d7ce8a75 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -156,6 +156,7 @@ static void clear_pages_worker(struct work_struct *work)
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
@@ -171,17 +172,20 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
 
-   err = i915_vma_pin(vma, 0, 0, PIN_USER);
-   if (unlikely(err))
+   i915_gem_ww_ctx_init(&ww, false);
+   intel_engine_pm_get(w->ce->engine);
+retry:
+   err = intel_context_pin_ww(w->ce, &ww);
+   if (err)
goto out_signal;
 
-   batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+   batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
-   goto out_unpin;
+   goto out_ctx;
}
 
-   rq = intel_context_create_request(w->ce);
+   rq = i915_request_create(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -223,9 +227,19 @@ static void clear_pages_worker(struct work_struct *work)
i915_request_add(rq);
 out_batch:
intel_emit_vma_release(w->ce, batch);
-out_unpin:
-   i915_vma_unpin(vma);
+out_ctx:
+   intel_context_unpin(w->ce);
 out_signal:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   i915_vma_unpin(w->sleeve->vma);
+   intel_engine_pm_put(w->ce->engine);
+
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
@@ -233,6 +247,44 @@ static void clear_pages_worker(struct work_struct *work)
}
 }
 
+static int pin_wait_clear_pages_work(struct clear_pages_work *w,
+struct intel_context *ce)
+{
+   struct i915_vma *vma = w->sleeve->vma;
+   struct i915_gem_ww_ctx ww;
+   int err;
+
+   i915_gem_ww_ctx_init(&ww, false);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto out;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+   if (unlikely(err))
+   goto out;
+
+   err = i915_sw_fence_await_reservation(&w->wait,
+ vma->obj->base.resv, NULL,
+ true, 0, I915_FENCE_GFP);
+   if (err)
+   goto err_unpin_vma;
+
+   dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
+
+err_unpin_vma:
+   if (err)
+   i915_vma_unpin(vma);
+out:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+   return err;
+}
+
 static int __i915_sw_fence_call
 clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
@@ -286,17 +338,9 @@ int i915_gem_schedule_fill_pages_blt(struct 
drm_i915_gem_object *obj,
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
 
-   i915_gem_object_lock(obj, NULL);
-   err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL, true, 0,
- I915_FENCE_GFP);
-   if (err < 0) {
+   err = pin_wait_clear_pages_work(work, ce);
+   if (err < 0)
dma_fence_set_error(&work->dma, err);
-   } else {
-   dma_resv_add_excl_fence(obj->base.resv, &work->dma);
-   err = 0;
-   }
-   i915_gem_object_unlock(obj);
 
dma_f