[Intel-gfx] [PATCH] drm/i915: make context status notifier head be per engine

2017-03-21 Thread Chris Wilson
From: Changbin Du 

GVTg has introduced the context status notifier to schedule the GVTg
workload. At that time, the notifier is bound to GVTg context only,
so GVTg is not aware of host workloads.

Now we are going to improve GVTg's guest workload scheduler policy,
and add Guc emulation support for new Gen graphics. Both these two
features require acknowledgment for all contexts running on hardware.
(But will not alter host workload.) So here try to make some change.

The change is simple:
  1. Move the context status notifier head from i915_gem_context to
 intel_engine_cs. Which means there is a notifier head per engine
 instead of per context. Execlist driver still call notifier for
 each context sched-in/out events of current engine.
  2. At GVTg side, it binds a notifier_block for each physical engine
 at GVTg initialization period. Then GVTg can hear all context
 status events.

In this patch, GVTg do nothing for host context event, but later
will add a function there. But in any case, the notifier callback is
a noop if this is no active vGPU.

Since intel_gvt_init() is called at early initialization stage and
require the status notifier head has been initiated, I initiate it in
intel_engine_setup().

v2: remove a redundant newline. (chris)

Fixes: 3c7ba6359d70 ("drm/i915: Introduce execlist context status change 
notification")
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100232
Signed-off-by: Changbin Du 
Cc: Joonas Lahtinen 
Cc: Tvrtko Ursulin 
Cc: Zhi Wang 
Reviewed-by: Chris Wilson 
Link: 
http://patchwork.freedesktop.org/patch/msgid/20170313024711.28591-1-changbin...@intel.com
Acked-by: Zhenyu Wang 
Signed-off-by: Chris Wilson 
(cherry picked from commit 3fc03069bc6e6c316f19bb526e3c8ce784677477)
---
 drivers/gpu/drm/i915/gvt/gvt.h  |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c| 45 ++---
 drivers/gpu/drm/i915/i915_gem_context.c |  1 -
 drivers/gpu/drm/i915/i915_gem_context.h |  3 ---
 drivers/gpu/drm/i915/intel_engine_cs.c  |  2 ++
 drivers/gpu/drm/i915/intel_lrc.c|  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +++
 7 files changed, 27 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 23791920ced1..6dfc48b63b71 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
-   struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
+   struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c 
b/drivers/gpu/drm/i915/gvt/scheduler.c
index 39a83eb7aecc..c4353ed86d4b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -130,12 +130,10 @@ static int populate_shadow_context(struct 
intel_vgpu_workload *workload)
 static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
 {
-   struct intel_vgpu *vgpu = container_of(nb,
-   struct intel_vgpu, shadow_ctx_notifier_block);
-   struct drm_i915_gem_request *req =
-   (struct drm_i915_gem_request *)data;
-   struct intel_gvt_workload_scheduler *scheduler =
-   &vgpu->gvt->scheduler;
+   struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+   struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+   shadow_ctx_notifier_block[req->engine->id]);
+   struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
 
@@ -534,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-   int i;
+   struct intel_engine_cs *engine;
+   enum intel_engine_id i;
 
gvt_dbg_core("clean workload scheduler\n");
 
-   for (i = 0; i < I915_NUM_ENGINES; i++) {
-   if (scheduler->thread[i]) {
-   kthread_stop(scheduler->thread[i]);
-   scheduler->thread[i] = NULL;
-   }
+   for_each_engine(engine, gvt->dev_priv, i) {
+   atomic_notifier_chain_unregister(
+   &engine->context_status_notifier,
+   

Re: [Intel-gfx] [PATCH] drm/i915: make context status notifier head be per engine

2017-03-12 Thread Du, Changbin
hi, chris,

On Fri, Mar 10, 2017 at 05:17:17PM +, Chris Wilson wrote:
> On Thu, Mar 09, 2017 at 07:27:24PM +0800, changbin...@intel.com wrote:
> > From: Changbin Du 
> > 
> > GVTg has introduced the context status notifier to schedule the GVTg
> > workload. At that time, the notifier is bound to GVTg context only,
> > so GVTg is not aware of host workloads.
> > 
> > Now we are going to improve GVTg's guest workload scheduler policy,
> > and add Guc emulation support for new Gen graphics. Both these two
> > features require acknowledgment for all contexts running on hardware.
> > (But will not alter host workload.) So here try to make some change.
> > 
> > The change is simple:
> >   1. Move the context status notifier head from i915_gem_context to
> >  intel_engine_cs. Which means there is a notifier head per engine
> >  instead of per context. Execlist driver still call notifier for
> >  each context sched-in/out events of current engine.
> >   2. At GVTg side, it binds a notifier_block for each physical engine
> >  at GVTg initialization period. Then GVTg can hear all context
> >  status events.
> > 
> > In this patch, GVTg do nothing for host context event, but later
> > will add a function there. But in any case, the notifier callback is
> > a noop if this is no active vGPU.
> > 
> > Since intel_gvt_init() is called at early initialization stage and
> > require the status notifier head has been initiated, I initiate it in
> > intel_engine_setup().
> > 
> > Signed-off-by: Changbin Du 
> Reviewed-by: Chris Wilson 
> 
> I presume you will apply this via gvt?
>
Sure, I'll sync with zhenyu about this. Then update patch with below
'bonus newline' fixed. Thanks.

> > diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c 
> > b/drivers/gpu/drm/i915/gvt/scheduler.c
> > index d3a56c9..64875ec 100644
> > --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> > +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> > @@ -127,15 +127,14 @@ static int populate_shadow_context(struct 
> > intel_vgpu_workload *workload)
> > return 0;
> >  }
> >  
> > +
> 
> Bonus newline
> 
> >  static int shadow_context_status_change(struct notifier_block *nb,
> > unsigned long action, void *data)
> >  {
> > -   struct intel_vgpu *vgpu = container_of(nb,
> > -   struct intel_vgpu, shadow_ctx_notifier_block);
> > -   struct drm_i915_gem_request *req =
> > -   (struct drm_i915_gem_request *)data;
> > -   struct intel_gvt_workload_scheduler *scheduler =
> > -   &vgpu->gvt->scheduler;
> > +   struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
> > +   struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
> > +   shadow_ctx_notifier_block[req->engine->id]);
> > +   struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> > struct intel_vgpu_workload *workload =
> > scheduler->current_workload[req->engine->id];
> >  
> 
> -- 
> Chris Wilson, Intel Open Source Technology Centre

-- 
Thanks,
Changbin Du


signature.asc
Description: PGP signature
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH] drm/i915: make context status notifier head be per engine

2017-03-10 Thread Chris Wilson
On Thu, Mar 09, 2017 at 07:27:24PM +0800, changbin...@intel.com wrote:
> From: Changbin Du 
> 
> GVTg has introduced the context status notifier to schedule the GVTg
> workload. At that time, the notifier is bound to GVTg context only,
> so GVTg is not aware of host workloads.
> 
> Now we are going to improve GVTg's guest workload scheduler policy,
> and add Guc emulation support for new Gen graphics. Both these two
> features require acknowledgment for all contexts running on hardware.
> (But will not alter host workload.) So here try to make some change.
> 
> The change is simple:
>   1. Move the context status notifier head from i915_gem_context to
>  intel_engine_cs. Which means there is a notifier head per engine
>  instead of per context. Execlist driver still call notifier for
>  each context sched-in/out events of current engine.
>   2. At GVTg side, it binds a notifier_block for each physical engine
>  at GVTg initialization period. Then GVTg can hear all context
>  status events.
> 
> In this patch, GVTg do nothing for host context event, but later
> will add a function there. But in any case, the notifier callback is
> a noop if this is no active vGPU.
> 
> Since intel_gvt_init() is called at early initialization stage and
> require the status notifier head has been initiated, I initiate it in
> intel_engine_setup().
> 
> Signed-off-by: Changbin Du 
Reviewed-by: Chris Wilson 

I presume you will apply this via gvt?

> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c 
> b/drivers/gpu/drm/i915/gvt/scheduler.c
> index d3a56c9..64875ec 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -127,15 +127,14 @@ static int populate_shadow_context(struct 
> intel_vgpu_workload *workload)
>   return 0;
>  }
>  
> +

Bonus newline

>  static int shadow_context_status_change(struct notifier_block *nb,
>   unsigned long action, void *data)
>  {
> - struct intel_vgpu *vgpu = container_of(nb,
> - struct intel_vgpu, shadow_ctx_notifier_block);
> - struct drm_i915_gem_request *req =
> - (struct drm_i915_gem_request *)data;
> - struct intel_gvt_workload_scheduler *scheduler =
> - &vgpu->gvt->scheduler;
> + struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
> + struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
> + shadow_ctx_notifier_block[req->engine->id]);
> + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
>   struct intel_vgpu_workload *workload =
>   scheduler->current_workload[req->engine->id];
>  

-- 
Chris Wilson, Intel Open Source Technology Centre
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH] drm/i915: make context status notifier head be per engine

2017-03-09 Thread changbin . du
From: Changbin Du 

GVTg has introduced the context status notifier to schedule the GVTg
workload. At that time, the notifier is bound to GVTg context only,
so GVTg is not aware of host workloads.

Now we are going to improve GVTg's guest workload scheduler policy,
and add Guc emulation support for new Gen graphics. Both these two
features require acknowledgment for all contexts running on hardware.
(But will not alter host workload.) So here try to make some change.

The change is simple:
  1. Move the context status notifier head from i915_gem_context to
 intel_engine_cs. Which means there is a notifier head per engine
 instead of per context. Execlist driver still call notifier for
 each context sched-in/out events of current engine.
  2. At GVTg side, it binds a notifier_block for each physical engine
 at GVTg initialization period. Then GVTg can hear all context
 status events.

In this patch, GVTg do nothing for host context event, but later
will add a function there. But in any case, the notifier callback is
a noop if this is no active vGPU.

Since intel_gvt_init() is called at early initialization stage and
require the status notifier head has been initiated, I initiate it in
intel_engine_setup().

Signed-off-by: Changbin Du 
---
 drivers/gpu/drm/i915/gvt/gvt.h  |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c| 46 ++---
 drivers/gpu/drm/i915/i915_gem_context.c |  1 -
 drivers/gpu/drm/i915/i915_gem_context.h |  3 ---
 drivers/gpu/drm/i915/intel_engine_cs.c  |  2 ++
 drivers/gpu/drm/i915/intel_lrc.c|  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +++
 7 files changed, 28 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2379192..6dfc48b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
-   struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
+   struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c 
b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c9..64875ec 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -127,15 +127,14 @@ static int populate_shadow_context(struct 
intel_vgpu_workload *workload)
return 0;
 }
 
+
 static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
 {
-   struct intel_vgpu *vgpu = container_of(nb,
-   struct intel_vgpu, shadow_ctx_notifier_block);
-   struct drm_i915_gem_request *req =
-   (struct drm_i915_gem_request *)data;
-   struct intel_gvt_workload_scheduler *scheduler =
-   &vgpu->gvt->scheduler;
+   struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+   struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+   shadow_ctx_notifier_block[req->engine->id]);
+   struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
 
@@ -513,15 +512,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-   int i;
+   struct intel_engine_cs *engine;
+   enum intel_engine_id i;
 
gvt_dbg_core("clean workload scheduler\n");
 
-   for (i = 0; i < I915_NUM_ENGINES; i++) {
-   if (scheduler->thread[i]) {
-   kthread_stop(scheduler->thread[i]);
-   scheduler->thread[i] = NULL;
-   }
+   for_each_engine(engine, gvt->dev_priv, i) {
+   atomic_notifier_chain_unregister(
+   &engine->context_status_notifier,
+   &gvt->shadow_ctx_notifier_block[i]);
+   kthread_stop(scheduler->thread[i]);
}
 }
 
@@ -529,18 +529,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt 
*gvt)
 {
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
+   struct intel_engine_cs *engine;
+   enum intel_engine_id i;
int ret;
-   int i;
 
gvt_dbg_core("init workload