Extend init/init_hw split to context init.
   - Move context initialisation in to i915_gem_init_hw
   - Move one off initialisation for render ring to
        i915_gem_validate_context
   - Move default context initialisation to logical_ring_init

Rename intel_lr_context_deferred_create to
intel_lr_context_deferred_alloc, to reflect reduced functionality &
alloc/init split.

This patch is intended to split out the allocation of resources & initialisation
to allow easier reuse of code for resume/gpu reset.

v2: Removed function ptr wrapping of do_switch_context (Daniel Vetter)
    Left ->init_context int intel_lr_context_deferred_alloc (Daniel Vetter)
    Remove unnecessary init flag & ring type test. (Daniel Vetter)
    Improve commit message (Daniel Vetter)
v3: On init/reinit, set the hw next sequence number to the sw next sequence
    number. This is set to 1 at driver load time. This prevents the seqno
    being reset on reinit (Chris Wilson)

Issue: VIZ-4798
Signed-off-by: Nick Hoath <nicholas.ho...@intel.com>
Cc: Daniel Vetter <daniel.vet...@ffwll.ch>
Cc: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |   1 -
 drivers/gpu/drm/i915/i915_gem.c            |  18 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   3 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 147 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_lrc.h           |   4 +-
 5 files changed, 86 insertions(+), 87 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f7fd519..844ccf0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -880,7 +880,6 @@ struct intel_context {
        } legacy_hw_ctx;
 
        /* Execlists */
-       bool rcs_initialized;
        struct {
                struct drm_i915_gem_object *state;
                struct intel_ringbuffer *ringbuf;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 73293b4..eb7c1f2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4603,14 +4603,8 @@ int i915_gem_init_rings(struct drm_device *dev)
                        goto cleanup_vebox_ring;
        }
 
-       ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
-       if (ret)
-               goto cleanup_bsd2_ring;
-
        return 0;
 
-cleanup_bsd2_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
 cleanup_vebox_ring:
        intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
@@ -4629,6 +4623,7 @@ i915_gem_init_hw(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
        int ret, i, j;
+       struct drm_i915_gem_request *req;
 
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
@@ -4680,9 +4675,12 @@ i915_gem_init_hw(struct drm_device *dev)
                        goto out;
        }
 
+       ret = i915_gem_set_seqno(dev, dev_priv->next_seqno);
+       if (ret)
+               goto out;
+
        /* Now it is safe to go back round and do everything else: */
        for_each_ring(ring, dev_priv, i) {
-               struct drm_i915_gem_request *req;
 
                WARN_ON(!ring->default_context);
 
@@ -4881,6 +4879,12 @@ i915_gem_load(struct drm_device *dev)
                dev_priv->num_fence_regs =
                                I915_READ(vgtif_reg(avail_rs.fence_num));
 
+       /*
+        * Set initial sequence number for requests.
+         */
+       dev_priv->next_seqno = 1;
+       dev_priv->last_seqno = ~((uint32_t)0);
+
        /* Initialize fence registers to zero */
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        i915_gem_restore_fences(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 923a3c4..95f1a0d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -994,6 +994,7 @@ i915_gem_validate_context(struct drm_device *dev, struct 
drm_file *file,
 {
        struct intel_context *ctx = NULL;
        struct i915_ctx_hang_stats *hs;
+       int ret;
 
        if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
                return ERR_PTR(-EINVAL);
@@ -1009,7 +1010,7 @@ i915_gem_validate_context(struct drm_device *dev, struct 
drm_file *file,
        }
 
        if (i915.enable_execlists && !ctx->engine[ring->id].state) {
-               int ret = intel_lr_context_deferred_create(ctx, ring);
+               ret = intel_lr_context_deferred_alloc(ctx, ring);
                if (ret) {
                        DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
                        return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 138964a..d0dc6b5 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1426,11 +1426,31 @@ out:
        return ret;
 }
 
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+               struct drm_i915_gem_object *default_ctx_obj)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       /* The status page is offset 0 from the default context object
+        * in LRC mode. */
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
+       ring->status_page.page_addr =
+                       kmap(sg_page(default_ctx_obj->pages->sgl));
+       ring->status_page.obj = default_ctx_obj;
+
+       I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+                       (u32)ring->status_page.gfx_addr);
+       POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       lrc_setup_hardware_status_page(ring,
+                               ring->default_context->engine[ring->id].state);
+
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
 
@@ -1841,8 +1861,33 @@ static int logical_ring_init(struct drm_device *dev, 
struct intel_engine_cs *rin
        if (ret)
                return ret;
 
-       ret = intel_lr_context_deferred_create(ring->default_context, ring);
+       ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+       if (ret)
+               return ret;
 
+       ret = i915_gem_obj_ggtt_pin(
+               ring->default_context->engine[ring->id].state,
+               GEN8_LR_CONTEXT_ALIGN, 0);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
+                               ret);
+               return ret;
+       }
+
+       ret = intel_pin_and_map_ringbuffer_obj(dev,
+               ring->default_context->engine[ring->id].ringbuf);
+       if (ret) {
+               DRM_ERROR(
+                       "Failed to pin and map ringbuffer %s: %d\n",
+                       ring->name, ret);
+               goto error_unpin_ggtt;
+       }
+
+       return ret;
+
+error_unpin_ggtt:
+       i915_gem_object_ggtt_unpin(
+               ring->default_context->engine[ring->id].state);
        return ret;
 }
 
@@ -2044,14 +2089,8 @@ int intel_logical_rings_init(struct drm_device *dev)
                        goto cleanup_vebox_ring;
        }
 
-       ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
-       if (ret)
-               goto cleanup_bsd2_ring;
-
        return 0;
 
-cleanup_bsd2_ring:
-       intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
 cleanup_vebox_ring:
        intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
@@ -2303,25 +2342,8 @@ static uint32_t get_lr_context_size(struct 
intel_engine_cs *ring)
        return ret;
 }
 
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-               struct drm_i915_gem_object *default_ctx_obj)
-{
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-       /* The status page is offset 0 from the default context object
-        * in LRC mode. */
-       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
-       ring->status_page.page_addr =
-                       kmap(sg_page(default_ctx_obj->pages->sgl));
-       ring->status_page.obj = default_ctx_obj;
-
-       I915_WRITE(RING_HWS_PGA(ring->mmio_base),
-                       (u32)ring->status_page.gfx_addr);
-       POSTING_READ(RING_HWS_PGA(ring->mmio_base));
-}
-
 /**
- * intel_lr_context_deferred_create() - create the LRC specific bits of a 
context
+ * intel_lr_context_deferred_alloc() - create the LRC specific bits of a 
context
  * @ctx: LR context to create.
  * @ring: engine to be used with the context.
  *
@@ -2333,10 +2355,10 @@ static void lrc_setup_hardware_status_page(struct 
intel_engine_cs *ring,
  *
  * Return: non-zero on error.
  */
-int intel_lr_context_deferred_create(struct intel_context *ctx,
+
+int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                                     struct intel_engine_cs *ring)
 {
-       const bool is_global_default_ctx = (ctx == ring->default_context);
        struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *ctx_obj;
        uint32_t context_size;
@@ -2354,22 +2376,12 @@ int intel_lr_context_deferred_create(struct 
intel_context *ctx,
                return -ENOMEM;
        }
 
-       if (is_global_default_ctx) {
-               ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
-               if (ret) {
-                       DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
-                                       ret);
-                       drm_gem_object_unreference(&ctx_obj->base);
-                       return ret;
-               }
-       }
-
        ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
        if (!ringbuf) {
                DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
                                ring->name);
                ret = -ENOMEM;
-               goto error_unpin_ctx;
+               goto error_deref_obj;
        }
 
        ringbuf->ring = ring;
@@ -2389,66 +2401,49 @@ int intel_lr_context_deferred_create(struct 
intel_context *ctx,
                                ring->name, ret);
                        goto error_free_rbuf;
                }
-
-               if (is_global_default_ctx) {
-                       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
-                       if (ret) {
-                               DRM_ERROR(
-                                       "Failed to pin and map ringbuffer %s: 
%d\n",
-                                       ring->name, ret);
-                               goto error_destroy_rbuf;
-                       }
-               }
-
        }
 
        ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
-               goto error;
+               goto error_destroy_rbuf;
        }
 
        ctx->engine[ring->id].ringbuf = ringbuf;
        ctx->engine[ring->id].state = ctx_obj;
 
-       if (ctx == ring->default_context)
-               lrc_setup_hardware_status_page(ring, ctx_obj);
-       else if (ring->id == RCS && !ctx->rcs_initialized) {
-               if (ring->init_context) {
-                       struct drm_i915_gem_request *req;
+       if (ctx != ring->default_context && ring->init_context) {
+               struct drm_i915_gem_request *req;
 
-                       ret = i915_gem_request_alloc(ring, ctx, &req);
-                       if (ret)
-                               return ret;
-
-                       ret = ring->init_context(req);
-                       if (ret) {
-                               DRM_ERROR("ring init context: %d\n", ret);
-                               i915_gem_request_cancel(req);
-                               ctx->engine[ring->id].ringbuf = NULL;
-                               ctx->engine[ring->id].state = NULL;
-                               goto error;
-                       }
-
-                       i915_add_request_no_flush(req);
+               ret = i915_gem_request_alloc(ring,
+                       ring->default_context, &req);
+               if (ret) {
+                       DRM_ERROR("ring create req: %d\n",
+                                 ret);
+                       i915_gem_request_cancel(req);
+                       goto error_destroy_rbuf;
                }
 
-               ctx->rcs_initialized = true;
+               ret = ring->init_context(req);
+               if (ret) {
+                       DRM_ERROR("ring init context: %d\n",
+                                 ret);
+                       i915_gem_request_cancel(req);
+                       goto error_destroy_rbuf;
+               }
+               i915_add_request_no_flush(req);
        }
 
        return 0;
 
-error:
-       if (is_global_default_ctx)
-               intel_unpin_ringbuffer_obj(ringbuf);
 error_destroy_rbuf:
        intel_destroy_ringbuffer_obj(ringbuf);
 error_free_rbuf:
        kfree(ringbuf);
-error_unpin_ctx:
-       if (is_global_default_ctx)
-               i915_gem_object_ggtt_unpin(ctx_obj);
+error_deref_obj:
        drm_gem_object_unreference(&ctx_obj->base);
+       ctx->engine[ring->id].ringbuf = NULL;
+       ctx->engine[ring->id].state = NULL;
        return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f99..109147e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -69,8 +69,8 @@ static inline void intel_logical_ring_emit(struct 
intel_ringbuffer *ringbuf,
 
 /* Logical Ring Contexts */
 void intel_lr_context_free(struct intel_context *ctx);
-int intel_lr_context_deferred_create(struct intel_context *ctx,
-                                    struct intel_engine_cs *ring);
+int intel_lr_context_deferred_alloc(struct intel_context *ctx,
+                                   struct intel_engine_cs *ring);
 void intel_lr_context_unpin(struct drm_i915_gem_request *req);
 void intel_lr_context_reset(struct drm_device *dev,
                        struct intel_context *ctx);
-- 
2.1.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to