On Tue, Jun 01, 2021 at 02:23:00PM -0700, Daniele Ceraolo Spurio wrote:
> 
> 
> On 6/1/2021 1:20 PM, Rodrigo Vivi wrote:
> > On Mon, May 24, 2021 at 10:47:50PM -0700, Daniele Ceraolo Spurio wrote:
> > > From: Chris Wilson <ch...@chris-wilson.co.uk>
> > > 
> > > Allow internal clients to create a pinned context.
> > > 
> > > v2 (Daniele): export destructor as well, allow optional usage of custom
> > > vm for maximum flexibility.
> > > 
> > > Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
> > > Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospu...@intel.com>
> > > ---
> > >   drivers/gpu/drm/i915/gt/intel_engine.h    | 10 ++++++++
> > >   drivers/gpu/drm/i915/gt/intel_engine_cs.c | 29 +++++++++++++++--------
> > >   2 files changed, 29 insertions(+), 10 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h 
> > > b/drivers/gpu/drm/i915/gt/intel_engine.h
> > > index 47ee8578e511..a64d28aba257 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> > > +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> > > @@ -18,7 +18,9 @@
> > >   #include "intel_workarounds.h"
> > >   struct drm_printer;
> > > +struct intel_context;
> > >   struct intel_gt;
> > > +struct lock_class_key;
> > >   /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is 
> > > overkill,
> > >    * but keeps the logic simple. Indeed, the whole purpose of this macro 
> > > is just
> > > @@ -255,6 +257,14 @@ struct i915_request *
> > >   intel_engine_find_active_request(struct intel_engine_cs *engine);
> > >   u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
> > > +struct intel_context *
> > > +intel_engine_create_pinned_context(struct intel_engine_cs *engine,
> > > +                            struct i915_address_space *vm,
> > > +                            unsigned int ring_size,
> > > +                            unsigned int hwsp,
> > > +                            struct lock_class_key *key,
> > > +                            const char *name);
> > > +void intel_engine_destroy_pinned_context(struct intel_context *ce);
> > >   void intel_engine_init_active(struct intel_engine_cs *engine,
> > >                                 unsigned int subclass);
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
> > > b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > > index eba2da9679a5..8cbf11497e8e 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > > +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > > @@ -801,11 +801,13 @@ intel_engine_init_active(struct intel_engine_cs 
> > > *engine, unsigned int subclass)
> > >   #endif
> > >   }
> > > -static struct intel_context *
> > > -create_pinned_context(struct intel_engine_cs *engine,
> > > -               unsigned int hwsp,
> > > -               struct lock_class_key *key,
> > > -               const char *name)
> > > +struct intel_context *
> > > +intel_engine_create_pinned_context(struct intel_engine_cs *engine,
> > > +                            struct i915_address_space *vm,
> > > +                            unsigned int ring_size,
> > > +                            unsigned int hwsp,
> > > +                            struct lock_class_key *key,
> > > +                            const char *name)
> > >   {
> > >           struct intel_context *ce;
> > >           int err;
> > > @@ -816,6 +818,12 @@ create_pinned_context(struct intel_engine_cs *engine,
> > >           __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
> > >           ce->timeline = page_pack_bits(NULL, hwsp);
> > > + ce->ring = __intel_context_ring_size(ring_size);
> > why do we need this now and we didn't need before?
> 
> Since we're now exporting the function as a more "official" interface, the
> idea was to provide as much flexibility as possible. The ring size could be
> used if e.g. we decide to use more pxp sessions and therefore need more
> space in the ring to insert instructions. Same for the vm below.

it makes sense. thanks for the explanation.


Reviewed-by: Rodrigo Vivi <rodrigo.v...@intel.com>



> 
> Daniele
> 
> > 
> > > +
> > > + if (vm) {
> > > +         i915_vm_put(ce->vm);
> > > +         ce->vm = i915_vm_get(vm);
> > > + }
> > same question here...
> > 
> > >           err = intel_context_pin(ce); /* perma-pin so it is always 
> > > available */
> > >           if (err) {
> > > @@ -834,7 +842,7 @@ create_pinned_context(struct intel_engine_cs *engine,
> > >           return ce;
> > >   }
> > > -static void destroy_pinned_context(struct intel_context *ce)
> > > +void intel_engine_destroy_pinned_context(struct intel_context *ce)
> > >   {
> > >           struct intel_engine_cs *engine = ce->engine;
> > >           struct i915_vma *hwsp = engine->status_page.vma;
> > > @@ -854,8 +862,9 @@ create_kernel_context(struct intel_engine_cs *engine)
> > >   {
> > >           static struct lock_class_key kernel;
> > > - return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR,
> > > -                              &kernel, "kernel_context");
> > > + return intel_engine_create_pinned_context(engine, NULL, SZ_4K,
> > > +                                           I915_GEM_HWS_SEQNO_ADDR,
> > > +                                           &kernel, "kernel_context");
> > >   }
> > >   /**
> > > @@ -898,7 +907,7 @@ static int engine_init_common(struct intel_engine_cs 
> > > *engine)
> > >           return 0;
> > >   err_context:
> > > - destroy_pinned_context(ce);
> > > + intel_engine_destroy_pinned_context(ce);
> > >           return ret;
> > >   }
> > > @@ -956,7 +965,7 @@ void intel_engine_cleanup_common(struct 
> > > intel_engine_cs *engine)
> > >                   fput(engine->default_state);
> > >           if (engine->kernel_context)
> > > -         destroy_pinned_context(engine->kernel_context);
> > > +         intel_engine_destroy_pinned_context(engine->kernel_context);
> > >           GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
> > >           cleanup_status_page(engine);
> > > -- 
> > > 2.29.2
> > > 
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to