[Intel-gfx] [PATCH 16/23] drm/i915: Convert i915_perf to ww locking as well

2020-05-20 Thread Maarten Lankhorst
We have the ordering of timeline->mutex vs resv_lock wrong,
convert the i915_pin_vma and intel_context_pin as well to
future-proof this.

We may need to do future changes to do this more transaction-like,
and only get down to a single i915_gem_ww_ctx, but for now this
should work.

Signed-off-by: Maarten Lankhorst 
---
 drivers/gpu/drm/i915/i915_perf.c | 57 +++-
 1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f35712d04ba4..056e53914760 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct 
i915_perf_stream *stream)
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
-   int err;
+   struct i915_gem_ww_ctx ww;
+   int err = -ENODEV;
 
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (ce->engine != stream->engine) /* first match! */
continue;
 
-   /*
-* As the ID is the gtt offset of the context's vma we
-* pin the vma to ensure the ID remains fixed.
-*/
-   err = intel_context_pin(ce);
-   if (err == 0) {
-   stream->pinned_ctx = ce;
-   break;
-   }
+   err = 0;
+   break;
}
i915_gem_context_unlock_engines(ctx);
 
+   if (err)
+   return ERR_PTR(err);
+
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   /*
+* As the ID is the gtt offset of the context's vma we
+* pin the vma to ensure the ID remains fixed.
+*/
+   err = intel_context_pin_ww(ce, &ww);
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   if (err)
+   return ERR_PTR(err);
+
+   stream->pinned_ctx = ce;
return stream->pinned_ctx;
 }
 
@@ -1922,15 +1937,22 @@ emit_oa_config(struct i915_perf_stream *stream,
 {
struct i915_request *rq;
struct i915_vma *vma;
+   struct i915_gem_ww_ctx ww;
int err;
 
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
return PTR_ERR(vma);
 
-   err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto err;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
-   goto err_vma_put;
+   goto err;
 
intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
@@ -1952,11 +1974,9 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request;
}
 
-   i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
-   i915_vma_unlock(vma);
if (err)
goto err_add_request;
 
@@ -1970,7 +1990,14 @@ emit_oa_config(struct i915_perf_stream *stream,
i915_request_add(rq);
 err_vma_unpin:
i915_vma_unpin(vma);
-err_vma_put:
+err:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+
+   i915_gem_ww_ctx_fini(&ww);
i915_vma_put(vma);
return err;
 }
-- 
2.26.2

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 16/23] drm/i915: Convert i915_perf to ww locking as well

2020-05-11 Thread Maarten Lankhorst
We have the ordering of timeline->mutex vs resv_lock wrong,
convert the i915_pin_vma and intel_context_pin as well to
future-proof this.

We may need to do future changes to do this more transaction-like,
and only get down to a single i915_gem_ww_ctx, but for now this
should work.

Signed-off-by: Maarten Lankhorst 
---
 drivers/gpu/drm/i915/i915_perf.c | 57 +++-
 1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index c533f569dd42..c675e6cd5967 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct 
i915_perf_stream *stream)
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
-   int err;
+   struct i915_gem_ww_ctx ww;
+   int err = -ENODEV;
 
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (ce->engine != stream->engine) /* first match! */
continue;
 
-   /*
-* As the ID is the gtt offset of the context's vma we
-* pin the vma to ensure the ID remains fixed.
-*/
-   err = intel_context_pin(ce);
-   if (err == 0) {
-   stream->pinned_ctx = ce;
-   break;
-   }
+   err = 0;
+   break;
}
i915_gem_context_unlock_engines(ctx);
 
+   if (err)
+   return ERR_PTR(err);
+
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   /*
+* As the ID is the gtt offset of the context's vma we
+* pin the vma to ensure the ID remains fixed.
+*/
+   err = intel_context_pin_ww(ce, &ww);
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   if (err)
+   return ERR_PTR(err);
+
+   stream->pinned_ctx = ce;
return stream->pinned_ctx;
 }
 
@@ -1925,15 +1940,22 @@ emit_oa_config(struct i915_perf_stream *stream,
 {
struct i915_request *rq;
struct i915_vma *vma;
+   struct i915_gem_ww_ctx ww;
int err;
 
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
return PTR_ERR(vma);
 
-   err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto err;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
-   goto err_vma_put;
+   goto err;
 
intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
@@ -1955,11 +1977,9 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request;
}
 
-   i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
-   i915_vma_unlock(vma);
if (err)
goto err_add_request;
 
@@ -1973,7 +1993,14 @@ emit_oa_config(struct i915_perf_stream *stream,
i915_request_add(rq);
 err_vma_unpin:
i915_vma_unpin(vma);
-err_vma_put:
+err:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+
+   i915_gem_ww_ctx_fini(&ww);
i915_vma_put(vma);
return err;
 }
-- 
2.26.2

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 16/23] drm/i915: Convert i915_perf to ww locking as well

2020-04-08 Thread Maarten Lankhorst
We have the ordering of timeline->mutex vs resv_lock wrong,
convert the i915_pin_vma and intel_context_pin as well to
future-proof this.

We may need to do future changes to do this more transaction-like,
and only get down to a single i915_gem_ww_ctx, but for now this
should work.

Signed-off-by: Maarten Lankhorst 
---
 drivers/gpu/drm/i915/i915_perf.c | 57 +++-
 1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2f78b147bb2d..418bd3263960 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct 
i915_perf_stream *stream)
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
-   int err;
+   struct i915_gem_ww_ctx ww;
+   int err = -ENODEV;
 
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (ce->engine != stream->engine) /* first match! */
continue;
 
-   /*
-* As the ID is the gtt offset of the context's vma we
-* pin the vma to ensure the ID remains fixed.
-*/
-   err = intel_context_pin(ce);
-   if (err == 0) {
-   stream->pinned_ctx = ce;
-   break;
-   }
+   err = 0;
+   break;
}
i915_gem_context_unlock_engines(ctx);
 
+   if (err)
+   return ERR_PTR(err);
+
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   /*
+* As the ID is the gtt offset of the context's vma we
+* pin the vma to ensure the ID remains fixed.
+*/
+   err = intel_context_pin_ww(ce, &ww);
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   if (err)
+   return ERR_PTR(err);
+
+   stream->pinned_ctx = ce;
return stream->pinned_ctx;
 }
 
@@ -1927,15 +1942,22 @@ emit_oa_config(struct i915_perf_stream *stream,
 {
struct i915_request *rq;
struct i915_vma *vma;
+   struct i915_gem_ww_ctx ww;
int err;
 
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
return PTR_ERR(vma);
 
-   err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto err;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
-   goto err_vma_put;
+   goto err;
 
intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
@@ -1957,11 +1979,9 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request;
}
 
-   i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
-   i915_vma_unlock(vma);
if (err)
goto err_add_request;
 
@@ -1975,7 +1995,14 @@ emit_oa_config(struct i915_perf_stream *stream,
i915_request_add(rq);
 err_vma_unpin:
i915_vma_unpin(vma);
-err_vma_put:
+err:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+
+   i915_gem_ww_ctx_fini(&ww);
i915_vma_put(vma);
return err;
 }
-- 
2.25.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 16/23] drm/i915: Convert i915_perf to ww locking as well

2020-04-02 Thread Maarten Lankhorst
We have the ordering of timeline->mutex vs resv_lock wrong,
convert the i915_pin_vma and intel_context_pin as well to
future-proof this.

We may need to do future changes to do this more transaction-like,
and only get down to a single i915_gem_ww_ctx, but for now this
should work.

Signed-off-by: Maarten Lankhorst 
---
 drivers/gpu/drm/i915/i915_perf.c | 57 +++-
 1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 28e3d76fa2e6..b4de6a3469bc 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct 
i915_perf_stream *stream)
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
-   int err;
+   struct i915_gem_ww_ctx ww;
+   int err = -ENODEV;
 
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (ce->engine != stream->engine) /* first match! */
continue;
 
-   /*
-* As the ID is the gtt offset of the context's vma we
-* pin the vma to ensure the ID remains fixed.
-*/
-   err = intel_context_pin(ce);
-   if (err == 0) {
-   stream->pinned_ctx = ce;
-   break;
-   }
+   err = 0;
+   break;
}
i915_gem_context_unlock_engines(ctx);
 
+   if (err)
+   return ERR_PTR(err);
+
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   /*
+* As the ID is the gtt offset of the context's vma we
+* pin the vma to ensure the ID remains fixed.
+*/
+   err = intel_context_pin_ww(ce, &ww);
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   if (err)
+   return ERR_PTR(err);
+
+   stream->pinned_ctx = ce;
return stream->pinned_ctx;
 }
 
@@ -1927,15 +1942,22 @@ emit_oa_config(struct i915_perf_stream *stream,
 {
struct i915_request *rq;
struct i915_vma *vma;
+   struct i915_gem_ww_ctx ww;
int err;
 
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
return PTR_ERR(vma);
 
-   err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto err;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
-   goto err_vma_put;
+   goto err;
 
intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
@@ -1957,11 +1979,9 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request;
}
 
-   i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
-   i915_vma_unlock(vma);
if (err)
goto err_add_request;
 
@@ -1975,7 +1995,14 @@ emit_oa_config(struct i915_perf_stream *stream,
i915_request_add(rq);
 err_vma_unpin:
i915_vma_unpin(vma);
-err_vma_put:
+err:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+
+   i915_gem_ww_ctx_fini(&ww);
i915_vma_put(vma);
return err;
 }
-- 
2.25.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 16/23] drm/i915: Convert i915_perf to ww locking as well

2020-03-31 Thread Maarten Lankhorst
We have the ordering of timeline->mutex vs resv_lock wrong,
convert the i915_pin_vma and intel_context_pin as well to
future-proof this.

We may need to do future changes to do this more transaction-like,
and only get down to a single i915_gem_ww_ctx, but for now this
should work.

Signed-off-by: Maarten Lankhorst 
---
 drivers/gpu/drm/i915/i915_perf.c | 57 +++-
 1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 28e3d76fa2e6..b4de6a3469bc 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct 
i915_perf_stream *stream)
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
-   int err;
+   struct i915_gem_ww_ctx ww;
+   int err = -ENODEV;
 
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (ce->engine != stream->engine) /* first match! */
continue;
 
-   /*
-* As the ID is the gtt offset of the context's vma we
-* pin the vma to ensure the ID remains fixed.
-*/
-   err = intel_context_pin(ce);
-   if (err == 0) {
-   stream->pinned_ctx = ce;
-   break;
-   }
+   err = 0;
+   break;
}
i915_gem_context_unlock_engines(ctx);
 
+   if (err)
+   return ERR_PTR(err);
+
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   /*
+* As the ID is the gtt offset of the context's vma we
+* pin the vma to ensure the ID remains fixed.
+*/
+   err = intel_context_pin_ww(ce, &ww);
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+   i915_gem_ww_ctx_fini(&ww);
+
+   if (err)
+   return ERR_PTR(err);
+
+   stream->pinned_ctx = ce;
return stream->pinned_ctx;
 }
 
@@ -1927,15 +1942,22 @@ emit_oa_config(struct i915_perf_stream *stream,
 {
struct i915_request *rq;
struct i915_vma *vma;
+   struct i915_gem_ww_ctx ww;
int err;
 
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
return PTR_ERR(vma);
 
-   err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+   i915_gem_ww_ctx_init(&ww, true);
+retry:
+   err = i915_gem_object_lock(vma->obj, &ww);
+   if (err)
+   goto err;
+
+   err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
-   goto err_vma_put;
+   goto err;
 
intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
@@ -1957,11 +1979,9 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request;
}
 
-   i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
-   i915_vma_unlock(vma);
if (err)
goto err_add_request;
 
@@ -1975,7 +1995,14 @@ emit_oa_config(struct i915_perf_stream *stream,
i915_request_add(rq);
 err_vma_unpin:
i915_vma_unpin(vma);
-err_vma_put:
+err:
+   if (err == -EDEADLK) {
+   err = i915_gem_ww_ctx_backoff(&ww);
+   if (!err)
+   goto retry;
+   }
+
+   i915_gem_ww_ctx_fini(&ww);
i915_vma_put(vma);
return err;
 }
-- 
2.25.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx