On Fri, Jun 13, 2014 at 08:37:29AM -0700, oscar.ma...@intel.com wrote:
> From: Oscar Mateo <oscar.ma...@intel.com>
> 
> As we have said a couple of times by now, logical ring contexts have
> their own ringbuffers: not only the backing pages, but the whole
> management struct.
> 
> In a previous version of the series, this was achieved with two separate
> patches:
> drm/i915/bdw: Allocate ringbuffer backing objects for default global LRC
> drm/i915/bdw: Allocate ringbuffer for user-created LRCs
> 
> Signed-off-by: Oscar Mateo <oscar.ma...@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_drv.h  |  1 +
>  drivers/gpu/drm/i915/intel_lrc.c | 38 ++++++++++++++++++++++++++++++++++++++
>  2 files changed, 39 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 347308e..79799d8 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -599,6 +599,7 @@ struct intel_context {
>       /* Execlists */
>       struct {
>               struct drm_i915_gem_object *obj;
> +             struct intel_ringbuffer *ringbuf;
>       } engine[I915_NUM_RINGS];
>  
>       struct i915_ctx_hang_stats hang_stats;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
> b/drivers/gpu/drm/i915/intel_lrc.c
> index 952212f..b3a23e0 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -60,7 +60,11 @@ void intel_lr_context_free(struct intel_context *ctx)
>  
>       for (i = 0; i < I915_NUM_RINGS; i++) {
>               struct drm_i915_gem_object *ctx_obj = ctx->engine[i].obj;
> +             struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
> +
>               if (ctx_obj) {
> +                     intel_destroy_ring_buffer(ringbuf);
> +                     kfree(ringbuf);
>                       i915_gem_object_ggtt_unpin(ctx_obj);
>                       drm_gem_object_unreference(&ctx_obj->base);
>               }
> @@ -94,6 +98,7 @@ int intel_lr_context_deferred_create(struct intel_context 
> *ctx,
>       struct drm_device *dev = ring->dev;
>       struct drm_i915_gem_object *ctx_obj;
>       uint32_t context_size;
> +     struct intel_ringbuffer *ringbuf;
>       int ret;
>  
>       WARN_ON(ctx->render_obj != NULL);
> @@ -114,6 +119,39 @@ int intel_lr_context_deferred_create(struct 
> intel_context *ctx,
>               return ret;
>       }
>  
> +     ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
> +     if (!ringbuf) {
> +             DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
> +                             ring->name);
> +             i915_gem_object_ggtt_unpin(ctx_obj);
> +             drm_gem_object_unreference(&ctx_obj->base);
> +             ret = -ENOMEM;
> +             return ret;
> +     }
> +
> +     ringbuf->size = 32 * PAGE_SIZE;
> +     ringbuf->effective_size = ringbuf->size;
> +     ringbuf->head = 0;
> +     ringbuf->tail = 0;
> +     ringbuf->space = ringbuf->size;
> +     ringbuf->last_retired_head = -1;
> +
> +     /* TODO: For now we put this in the mappable region so that we can reuse
> +      * the existing ringbuffer code which ioremaps it. When we start
> +      * creating many contexts, this will no longer work and we must switch
> +      * to a kmapish interface.
> +      */

It looks like this comment still exists at the end of the series. Does it
still apply or did we find that this is not an issue?

Brad

> +     ret = intel_allocate_ring_buffer(dev, ringbuf);
> +     if (ret) {
> +             DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
> +                             ring->name, ret);
> +             kfree(ringbuf);
> +             i915_gem_object_ggtt_unpin(ctx_obj);
> +             drm_gem_object_unreference(&ctx_obj->base);
> +             return ret;
> +     }
> +
> +     ctx->engine[ring->id].ringbuf = ringbuf;
>       ctx->engine[ring->id].obj = ctx_obj;
>  
>       return 0;
> -- 
> 1.9.0
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to