From: Tvrtko Ursulin
We can use the new pin/lazy unpin API for simplicity
and more performance in the execlist submission paths.
v2:
* Fix error handling and convert more users.
* Compact some names for readability.
v3:
* intel_lr_context_free was not unpinning.
* Special case for GPU reset which otherwise unbalances
the HWS object pages pin count by running the engine
initialization only (not destructors).
v4:
* Rebased on top of hws setup/init split.
Signed-off-by: Tvrtko Ursulin
Cc: Chris Wilson
---
drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
drivers/gpu/drm/i915/intel_lrc.c| 82 ++---
drivers/gpu/drm/i915/intel_lrc.h| 7 ++-
3 files changed, 52 insertions(+), 39 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c
b/drivers/gpu/drm/i915/i915_gem_context.c
index fe580cb9501a..91028d9c6269 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -342,7 +342,7 @@ void i915_gem_context_reset(struct drm_device *dev)
struct intel_context *ctx;
list_for_each_entry(ctx, _priv->context_list, link)
- intel_lr_context_reset(dev, ctx);
+ intel_lr_context_reset(dev_priv, ctx);
}
for (i = 0; i < I915_NUM_ENGINES; i++) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3fd2ae6ce8e7..b61f8da5d6f3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1091,8 +1091,8 @@ static int intel_lr_context_do_pin(struct intel_context
*ctx,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
- struct page *lrc_state_page;
- uint32_t *lrc_reg_state;
+ void *obj_addr;
+ u32 *lrc_reg_state;
int ret;
WARN_ON(!mutex_is_locked(>dev->struct_mutex));
@@ -1102,19 +1102,20 @@ static int intel_lr_context_do_pin(struct intel_context
*ctx,
if (ret)
return ret;
- lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- if (WARN_ON(!lrc_state_page)) {
- ret = -ENODEV;
+ obj_addr = i915_gem_object_pin_map(ctx_obj);
+ if (IS_ERR(obj_addr)) {
+ ret = PTR_ERR(obj_addr);
goto unpin_ctx_obj;
}
+ lrc_reg_state = obj_addr + LRC_STATE_PN * PAGE_SIZE;
+
ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
if (ret)
- goto unpin_ctx_obj;
+ goto unpin_map;
ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, engine);
- lrc_reg_state = kmap(lrc_state_page);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
@@ -1125,6 +1126,8 @@ static int intel_lr_context_do_pin(struct intel_context
*ctx,
return ret;
+unpin_map:
+ i915_gem_object_unpin_map(ctx_obj);
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
@@ -1157,7 +1160,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
WARN_ON(!mutex_is_locked(>i915->dev->struct_mutex));
if (--ctx->engine[engine->id].pin_count == 0) {
- kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+ i915_gem_object_unpin_map(ctx_obj);
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL;
@@ -2054,7 +2057,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs
*engine)
i915_gem_batch_pool_fini(>batch_pool);
if (engine->status_page.obj) {
- kunmap(sg_page(engine->status_page.obj->pages->sgl));
+ i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
}
@@ -2092,18 +2095,22 @@ logical_ring_default_irqs(struct intel_engine_cs
*engine, unsigned shift)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-static void
+static int
lrc_setup_hws(struct intel_engine_cs *engine,
struct drm_i915_gem_object *dctx_obj)
{
- struct page *page;
+ void *hws;
/* The HWSP is part of the default context object in LRC mode. */
engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
- page = i915_gem_object_get_page(dctx_obj, LRC_PPHWSP_PN);
- engine->status_page.page_addr = kmap(page);
+ hws =