Re: [Intel-gfx] [PATCH 3/5] drm/i915/guc: Add GuC ADS - scheduler policies

2015-12-17 Thread Chris Wilson
On Thu, Dec 17, 2015 at 10:36:02AM -0800, Yu Dai wrote:
> On 12/16/2015 11:39 PM, Chris Wilson wrote:
> >On Wed, Dec 16, 2015 at 01:40:53PM -0800, yu@intel.com wrote:
> >> From: Alex Dai 
> >> @@ -884,6 +906,13 @@ static void guc_create_ads(struct intel_guc *guc)
> >>for_each_ring(ring, dev_priv, i)
> >>ads->eng_state_size[i] = intel_lr_context_size(ring);
> >>
> >> +  /* GuC scheduling policies */
> >> +  policies = (void *)ads + sizeof(struct guc_ads);
> >> +  init_guc_policies(policies);
> >
> >Please limit atomic context to only the critical section, i.e. don't
> >make me have to read every single function to check for violations.
> 
> Could you clarify this? I am not sure what's the atomic context and
> critical section you mentioned here.

This is inside an kmap_atomic()/kunmap_atomic() section. That means this
code is in atomic context and cannot sleep or be preempted i.e. there
are restrictions placed on what the code can do and what it can call.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 3/5] drm/i915/guc: Add GuC ADS - scheduler policies

2015-12-17 Thread Yu Dai



On 12/16/2015 11:39 PM, Chris Wilson wrote:

On Wed, Dec 16, 2015 at 01:40:53PM -0800, yu@intel.com wrote:
> From: Alex Dai 
>
> GuC supports different scheduling policies for its four internal
> queues. Currently these have been set to the same default values
> as KMD_NORMAL queue.
>
> Particularly POLICY_MAX_NUM_WI is set to 15 to match GuC internal
> maximum submit queue numbers to avoid an out-of-space problem.
> This value indicates max number of work items allowed to be queued
> for one DPC process. A smaller value will let GuC schedule more
> frequently while a larger number may increase chances to optimize
> cmds (such as collapse cmds from same lrc) with risks that keeps
> CS idle.
>
> Signed-off-by: Alex Dai 
> ---
>  drivers/gpu/drm/i915/i915_guc_submission.c | 31 +++-
>  drivers/gpu/drm/i915/intel_guc_fwif.h  | 45 
++
>  2 files changed, 75 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 66d85c3..a5c555c 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -842,17 +842,39 @@ static void guc_create_log(struct intel_guc *guc)
>guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
>  }
>
> +static void init_guc_policies(struct guc_policies *policies)
> +{
> +  struct guc_policy *policy;
> +  u32 p, i;
> +
> +  policies->dpc_promote_time = 50;
> +  policies->max_num_work_items = POLICY_MAX_NUM_WI;
> +
> +  for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++)
> +  for (i = 0; i < I915_NUM_RINGS; i++) {

Please indent this properly.

> +  policy = &policies->policy[p][i];
> +
> +  policy->execution_quantum = 100;
> +  policy->preemption_time = 50;
> +  policy->fault_time = 25;
> +  policy->policy_flags = 0;
> +  }
> +
> +  policies->is_valid = 1;
> +}
> +
>  static void guc_create_ads(struct intel_guc *guc)
>  {
>struct drm_i915_private *dev_priv = guc_to_i915(guc);
>struct drm_i915_gem_object *obj;
>struct guc_ads *ads;
> +  struct guc_policies *policies;
>struct intel_engine_cs *ring;
>struct page *page;
>u32 size, i;
>
>/* The ads obj includes the struct itself and buffers passed to GuC */
> -  size = sizeof(struct guc_ads);
> +  size = sizeof(struct guc_ads) + sizeof(struct guc_policies);
>
>obj = guc->ads_obj;
>if (!obj) {
> @@ -884,6 +906,13 @@ static void guc_create_ads(struct intel_guc *guc)
>for_each_ring(ring, dev_priv, i)
>ads->eng_state_size[i] = intel_lr_context_size(ring);
>
> +  /* GuC scheduling policies */
> +  policies = (void *)ads + sizeof(struct guc_ads);
> +  init_guc_policies(policies);

Please limit atomic context to only the critical section, i.e. don't
make me have to read every single function to check for violations.


Could you clarify this? I am not sure what's the atomic context and 
critical section you mentioned here.


Alex

> +
> +  ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
> +  sizeof(struct guc_ads);
> +
>kunmap_atomic(ads);



___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 3/5] drm/i915/guc: Add GuC ADS - scheduler policies

2015-12-16 Thread Chris Wilson
On Wed, Dec 16, 2015 at 01:40:53PM -0800, yu@intel.com wrote:
> From: Alex Dai 
> 
> GuC supports different scheduling policies for its four internal
> queues. Currently these have been set to the same default values
> as KMD_NORMAL queue.
> 
> Particularly POLICY_MAX_NUM_WI is set to 15 to match GuC internal
> maximum submit queue numbers to avoid an out-of-space problem.
> This value indicates max number of work items allowed to be queued
> for one DPC process. A smaller value will let GuC schedule more
> frequently while a larger number may increase chances to optimize
> cmds (such as collapse cmds from same lrc) with risks that keeps
> CS idle.
> 
> Signed-off-by: Alex Dai 
> ---
>  drivers/gpu/drm/i915/i915_guc_submission.c | 31 +++-
>  drivers/gpu/drm/i915/intel_guc_fwif.h  | 45 
> ++
>  2 files changed, 75 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
> b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 66d85c3..a5c555c 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -842,17 +842,39 @@ static void guc_create_log(struct intel_guc *guc)
>   guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
>  }
>  
> +static void init_guc_policies(struct guc_policies *policies)
> +{
> + struct guc_policy *policy;
> + u32 p, i;
> +
> + policies->dpc_promote_time = 50;
> + policies->max_num_work_items = POLICY_MAX_NUM_WI;
> +
> + for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++)
> + for (i = 0; i < I915_NUM_RINGS; i++) {

Please indent this properly.

> + policy = &policies->policy[p][i];
> +
> + policy->execution_quantum = 100;
> + policy->preemption_time = 50;
> + policy->fault_time = 25;
> + policy->policy_flags = 0;
> + }
> +
> + policies->is_valid = 1;
> +}
> +
>  static void guc_create_ads(struct intel_guc *guc)
>  {
>   struct drm_i915_private *dev_priv = guc_to_i915(guc);
>   struct drm_i915_gem_object *obj;
>   struct guc_ads *ads;
> + struct guc_policies *policies;
>   struct intel_engine_cs *ring;
>   struct page *page;
>   u32 size, i;
>  
>   /* The ads obj includes the struct itself and buffers passed to GuC */
> - size = sizeof(struct guc_ads);
> + size = sizeof(struct guc_ads) + sizeof(struct guc_policies);
>  
>   obj = guc->ads_obj;
>   if (!obj) {
> @@ -884,6 +906,13 @@ static void guc_create_ads(struct intel_guc *guc)
>   for_each_ring(ring, dev_priv, i)
>   ads->eng_state_size[i] = intel_lr_context_size(ring);
>  
> + /* GuC scheduling policies */
> + policies = (void *)ads + sizeof(struct guc_ads);
> + init_guc_policies(policies);

Please limit atomic context to only the critical section, i.e. don't
make me have to read every single function to check for violations.
-Chris

> +
> + ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
> + sizeof(struct guc_ads);
> +
>   kunmap_atomic(ads);

-- 
Chris Wilson, Intel Open Source Technology Centre
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 3/5] drm/i915/guc: Add GuC ADS - scheduler policies

2015-12-16 Thread yu . dai
From: Alex Dai 

GuC supports different scheduling policies for its four internal
queues. Currently these have been set to the same default values
as KMD_NORMAL queue.

Particularly POLICY_MAX_NUM_WI is set to 15 to match GuC internal
maximum submit queue numbers to avoid an out-of-space problem.
This value indicates max number of work items allowed to be queued
for one DPC process. A smaller value will let GuC schedule more
frequently while a larger number may increase chances to optimize
cmds (such as collapse cmds from same lrc) with risks that keeps
CS idle.

Signed-off-by: Alex Dai 
---
 drivers/gpu/drm/i915/i915_guc_submission.c | 31 +++-
 drivers/gpu/drm/i915/intel_guc_fwif.h  | 45 ++
 2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index 66d85c3..a5c555c 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -842,17 +842,39 @@ static void guc_create_log(struct intel_guc *guc)
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
+static void init_guc_policies(struct guc_policies *policies)
+{
+   struct guc_policy *policy;
+   u32 p, i;
+
+   policies->dpc_promote_time = 50;
+   policies->max_num_work_items = POLICY_MAX_NUM_WI;
+
+   for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++)
+   for (i = 0; i < I915_NUM_RINGS; i++) {
+   policy = &policies->policy[p][i];
+
+   policy->execution_quantum = 100;
+   policy->preemption_time = 50;
+   policy->fault_time = 25;
+   policy->policy_flags = 0;
+   }
+
+   policies->is_valid = 1;
+}
+
 static void guc_create_ads(struct intel_guc *guc)
 {
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct guc_ads *ads;
+   struct guc_policies *policies;
struct intel_engine_cs *ring;
struct page *page;
u32 size, i;
 
/* The ads obj includes the struct itself and buffers passed to GuC */
-   size = sizeof(struct guc_ads);
+   size = sizeof(struct guc_ads) + sizeof(struct guc_policies);
 
obj = guc->ads_obj;
if (!obj) {
@@ -884,6 +906,13 @@ static void guc_create_ads(struct intel_guc *guc)
for_each_ring(ring, dev_priv, i)
ads->eng_state_size[i] = intel_lr_context_size(ring);
 
+   /* GuC scheduling policies */
+   policies = (void *)ads + sizeof(struct guc_ads);
+   init_guc_policies(policies);
+
+   ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
+   sizeof(struct guc_ads);
+
kunmap_atomic(ads);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h 
b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 76ecc85..0cc17c7 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,7 @@
 #define GUC_CTX_PRIORITY_HIGH  1
 #define GUC_CTX_PRIORITY_KMD_NORMAL2
 #define GUC_CTX_PRIORITY_NORMAL3
+#define GUC_CTX_PRIORITY_NUM   4
 
 #define GUC_MAX_GPU_CONTEXTS   1024
 #defineGUC_INVALID_CTX_ID  GUC_MAX_GPU_CONTEXTS
@@ -316,6 +317,50 @@ struct guc_context_desc {
 #define GUC_POWER_D2   3
 #define GUC_POWER_D3   4
 
+/* Scheduling policy settings */
+
+/* Reset engine upon preempt failure */
+#define POLICY_RESET_ENGINE(1<<0)
+/* Preempt to idle on quantum expiry */
+#define POLICY_PREEMPT_TO_IDLE (1<<1)
+
+#define POLICY_MAX_NUM_WI  15
+
+struct guc_policy {
+   /* Time for one workload to execute. (in micro seconds) */
+   u32 execution_quantum;
+   u32 reserved1;
+
+   /* Time to wait for a preemption request to completed before issuing a
+* reset. (in micro seconds). */
+   u32 preemption_time;
+
+   /* How much time to allow to run after the first fault is observed.
+* Then preempt afterwards. (in micro seconds) */
+   u32 fault_time;
+
+   u32 policy_flags;
+   u32 reserved[2];
+} __packed;
+
+struct guc_policies {
+   struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS];
+
+   /* In micro seconds. How much time to allow before DPC processing is
+* called back via interrupt (to prevent DPC queue drain starving).
+* Typically 1000s of micro seconds (example only, not granularity). */
+   u32 dpc_promote_time;
+
+   /* Must be set to take these new values. */
+   u32 is_valid;
+
+   /* Max number of WIs to process per call. A large value may keep CS
+* idle. */
+   u32 max_num_work_items;
+
+   u32 reserved[19];
+} __packed;
+
 /* GuC Additional Data Struct */
 
 struct guc_ads {
-- 
2.5.0

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.or