Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Wed, Aug 31, 2022 at 01:25:11PM -0700, Dixit, Ashutosh wrote: On Fri, 26 Aug 2022 09:33:08 -0700, Umesh Nerlige Ramappa wrote: Hi Umesh, Just to communicate my thoughts I have posted this patch on top of your patch: [1] https://patchwork.freedesktop.org/series/107983/ Could you please take a look at that and see if it makes sense. On Thu, Aug 25, 2022 at 06:44:50PM -0700, Dixit, Ashutosh wrote: > On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > > Hi Umesh, I am fairly new to this code so some questions will be below will > be newbie questions, thanks for bearing with me. > >> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c >> index 654a092ed3d6..e2d70a9fdac0 100644 >> --- a/drivers/gpu/drm/i915/gt/intel_context.c >> +++ b/drivers/gpu/drm/i915/gt/intel_context.c >> @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, >>child->parallel.parent = parent; >> } >> >> -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) >> +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) >> { >>u64 total, active; >> >> + if (ce->ops->update_stats) >> + ce->ops->update_stats(ce); >> + >>total = ce->stats.runtime.total; >>if (ce->ops->flags & COPS_RUNTIME_CYCLES) >>total *= ce->engine->gt->clock_period_ns; >> >>active = READ_ONCE(ce->stats.active); >> - if (active) >> + /* >> + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend >> + * already provides the total active time of the context, so skip this >> + * calculation when this flag is set. >> + */ >> + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) >>active = intel_context_clock() - active; >> >>return total + active; > > /snip/ > >> @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) >>with_intel_runtime_pm(>i915->runtime_pm, wakeref) >>__update_guc_busyness_stats(guc); >> >> + /* adjust context stats for overflow */ >> + xa_for_each(>context_lookup, index, ce) >> + __guc_context_update_clks(ce); > > What is the reason for calling __guc_context_update_clks() periodically > from guc_timestamp_ping() since it appears we should just be able to call > __guc_context_update_clks() from intel_context_get_total_runtime_ns() to > update 'active'? Is the reason for calling __guc_context_update_clks() > periodically that the calculations in __guc_context_update_clks() become > invalid if the counters overflow? Correct, these are 32-bit counters and the worker just tracks overflow. OK. > >> + >>intel_gt_reset_unlock(gt, srcu); >> >>mod_delayed_work(system_highpri_wq, >timestamp.work, >> @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) >> guc->timestamp.ping_delay); >> } >> >> +static void __guc_context_update_clks(struct intel_context *ce) >> +{ >> + struct intel_guc *guc = ce_to_guc(ce); >> + struct intel_gt *gt = ce->engine->gt; >> + u32 *pphwsp, last_switch, engine_id; >> + u64 start_gt_clk, active; >> + unsigned long flags; >> + ktime_t unused; >> + >> + spin_lock_irqsave(>timestamp.lock, flags); >> + >> + /* >> + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched >> + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) >> + * relies on GuC and GPU for busyness calculations. Due to this, A >> + * potential race was highlighted in an earlier review that can lead to >> + * double accounting of busyness. While the solution to this is a wip, >> + * busyness is still usable for platforms running GuC submission. >> + */ >> + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; >> + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); >> + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); >> + >> + guc_update_pm_timestamp(guc, ); >> + >> + if (engine_id != 0x && last_switch) { >> + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); >> + __extend_last_switch(guc, _gt_clk, last_switch); >> + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); >> + WRITE_ONCE(ce->stats.active, active); > > Should not need WRITE_ONCE to update regular memory. Not even sure we need > READ_ONCE above. Not sure I checked what they do. I was thinking these are needed for the memory ordering (as in be sure that start_gt_clk is updated before active). As long as our operations are done under correct locks we don't have to worry about memory ordering. That is one of the reasons I am doing everything under
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Fri, 26 Aug 2022 09:33:08 -0700, Umesh Nerlige Ramappa wrote: > Hi Umesh, Just to communicate my thoughts I have posted this patch on top of your patch: [1] https://patchwork.freedesktop.org/series/107983/ Could you please take a look at that and see if it makes sense. > On Thu, Aug 25, 2022 at 06:44:50PM -0700, Dixit, Ashutosh wrote: > > On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > > > > Hi Umesh, I am fairly new to this code so some questions will be below will > > be newbie questions, thanks for bearing with me. > > > >> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c > >> b/drivers/gpu/drm/i915/gt/intel_context.c > >> index 654a092ed3d6..e2d70a9fdac0 100644 > >> --- a/drivers/gpu/drm/i915/gt/intel_context.c > >> +++ b/drivers/gpu/drm/i915/gt/intel_context.c > >> @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct > >> intel_context *parent, > >>child->parallel.parent = parent; > >> } > >> > >> -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > >> +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > >> { > >>u64 total, active; > >> > >> + if (ce->ops->update_stats) > >> + ce->ops->update_stats(ce); > >> + > >>total = ce->stats.runtime.total; > >>if (ce->ops->flags & COPS_RUNTIME_CYCLES) > >>total *= ce->engine->gt->clock_period_ns; > >> > >>active = READ_ONCE(ce->stats.active); > >> - if (active) > >> + /* > >> + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend > >> + * already provides the total active time of the context, so skip this > >> + * calculation when this flag is set. > >> + */ > >> + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) > >>active = intel_context_clock() - active; > >> > >>return total + active; > > > > /snip/ > > > >> @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct > >> *wrk) > >>with_intel_runtime_pm(>i915->runtime_pm, wakeref) > >>__update_guc_busyness_stats(guc); > >> > >> + /* adjust context stats for overflow */ > >> + xa_for_each(>context_lookup, index, ce) > >> + __guc_context_update_clks(ce); > > > > What is the reason for calling __guc_context_update_clks() periodically > > from guc_timestamp_ping() since it appears we should just be able to call > > __guc_context_update_clks() from intel_context_get_total_runtime_ns() to > > update 'active'? Is the reason for calling __guc_context_update_clks() > > periodically that the calculations in __guc_context_update_clks() become > > invalid if the counters overflow? > > Correct, these are 32-bit counters and the worker just tracks overflow. OK. > > > > >> + > >>intel_gt_reset_unlock(gt, srcu); > >> > >>mod_delayed_work(system_highpri_wq, >timestamp.work, > >> @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) > >> guc->timestamp.ping_delay); > >> } > >> > >> +static void __guc_context_update_clks(struct intel_context *ce) > >> +{ > >> + struct intel_guc *guc = ce_to_guc(ce); > >> + struct intel_gt *gt = ce->engine->gt; > >> + u32 *pphwsp, last_switch, engine_id; > >> + u64 start_gt_clk, active; > >> + unsigned long flags; > >> + ktime_t unused; > >> + > >> + spin_lock_irqsave(>timestamp.lock, flags); > >> + > >> + /* > >> + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > >> + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > >> + * relies on GuC and GPU for busyness calculations. Due to this, A > >> + * potential race was highlighted in an earlier review that can lead to > >> + * double accounting of busyness. While the solution to this is a wip, > >> + * busyness is still usable for platforms running GuC submission. > >> + */ > >> + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > >> + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > >> + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > >> + > >> + guc_update_pm_timestamp(guc, ); > >> + > >> + if (engine_id != 0x && last_switch) { > >> + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > >> + __extend_last_switch(guc, _gt_clk, last_switch); > >> + active = intel_gt_clock_interval_to_ns(gt, > >> guc->timestamp.gt_stamp - start_gt_clk); > >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > >> + WRITE_ONCE(ce->stats.active, active); > > > > Should not need WRITE_ONCE to update regular memory. Not even sure we need > > READ_ONCE above. > > Not sure I checked what they do. I was thinking these are needed for the > memory ordering (as in be sure that start_gt_clk is updated before > active). As long as our operations are done under correct locks we don't have to worry about memory ordering. That is one of the reasons I am doing everything under the spinlock in [1]. > > > > >> + } else { > >> +
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Thu, Aug 25, 2022 at 06:44:50PM -0700, Dixit, Ashutosh wrote: On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: Hi Umesh, I am fairly new to this code so some questions will be below will be newbie questions, thanks for bearing with me. diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 654a092ed3d6..e2d70a9fdac0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; /snip/ @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) with_intel_runtime_pm(>i915->runtime_pm, wakeref) __update_guc_busyness_stats(guc); + /* adjust context stats for overflow */ + xa_for_each(>context_lookup, index, ce) + __guc_context_update_clks(ce); What is the reason for calling __guc_context_update_clks() periodically from guc_timestamp_ping() since it appears we should just be able to call __guc_context_update_clks() from intel_context_get_total_runtime_ns() to update 'active'? Is the reason for calling __guc_context_update_clks() periodically that the calculations in __guc_context_update_clks() become invalid if the counters overflow? Correct, these are 32-bit counters and the worker just tracks overflow. + intel_gt_reset_unlock(gt, srcu); mod_delayed_work(system_highpri_wq, >timestamp.work, @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) guc->timestamp.ping_delay); } +static void __guc_context_update_clks(struct intel_context *ce) +{ + struct intel_guc *guc = ce_to_guc(ce); + struct intel_gt *gt = ce->engine->gt; + u32 *pphwsp, last_switch, engine_id; + u64 start_gt_clk, active; + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(>timestamp.lock, flags); + + /* +* GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched +* out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) +* relies on GuC and GPU for busyness calculations. Due to this, A +* potential race was highlighted in an earlier review that can lead to +* double accounting of busyness. While the solution to this is a wip, +* busyness is still usable for platforms running GuC submission. +*/ + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); + + guc_update_pm_timestamp(guc, ); + + if (engine_id != 0x && last_switch) { + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); + __extend_last_switch(guc, _gt_clk, last_switch); + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); + WRITE_ONCE(ce->stats.active, active); Should not need WRITE_ONCE to update regular memory. Not even sure we need READ_ONCE above. Not sure I checked what they do. I was thinking these are needed for the memory ordering (as in be sure that start_gt_clk is updated before active). + } else { + lrc_update_runtime(ce); As was being discussed, should not need this here in this function. See below too. In short, I added this here so that a query for busyness following idle can be obtained immediately. For GuC backend, the context is unpinned after disabling scheduling on that context and that is asynchronous. Also if there are more requests on that context, the scheduling may not be disabled and unpin may not happen, so updated runtime would only be seen much much later. It is still safe to call from here because we know that the context is not active and has switched out. If it did switch in while we were reading this, that's still fine, we
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Wed, Aug 24, 2022 at 06:17:19PM -0700, Dixit, Ashutosh wrote: On Fri, 05 Aug 2022 08:18:48 -0700, Umesh Nerlige Ramappa wrote: On Fri, Aug 05, 2022 at 10:45:30AM +0100, Tvrtko Ursulin wrote: > > On 05/08/2022 00:21, Umesh Nerlige Ramappa wrote: >> -static inline struct intel_guc *ce_to_guc(struct intel_context *ce) >> +static inline struct intel_guc *ce_to_guc(const struct intel_context *ce) > > This is odd since the helper now takes away constness. I can't really > figure out why the change is needed? Hi Umesh, I am also wondering about this, I think you missed answering this question from Tvrtko. This helper 'adds' constness, so wasn't sure if the comment was intended for this helper. Thanks, Umesh Thanks. -- Ashutosh
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: Hi Umesh, I am fairly new to this code so some questions will be below will be newbie questions, thanks for bearing with me. > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c > b/drivers/gpu/drm/i915/gt/intel_context.c > index 654a092ed3d6..e2d70a9fdac0 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.c > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct > intel_context *parent, > child->parallel.parent = parent; > } > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > { > u64 total, active; > > + if (ce->ops->update_stats) > + ce->ops->update_stats(ce); > + > total = ce->stats.runtime.total; > if (ce->ops->flags & COPS_RUNTIME_CYCLES) > total *= ce->engine->gt->clock_period_ns; > > active = READ_ONCE(ce->stats.active); > - if (active) > + /* > + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend > + * already provides the total active time of the context, so skip this > + * calculation when this flag is set. > + */ > + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) > active = intel_context_clock() - active; > > return total + active; /snip/ > @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) > with_intel_runtime_pm(>i915->runtime_pm, wakeref) > __update_guc_busyness_stats(guc); > > + /* adjust context stats for overflow */ > + xa_for_each(>context_lookup, index, ce) > + __guc_context_update_clks(ce); What is the reason for calling __guc_context_update_clks() periodically from guc_timestamp_ping() since it appears we should just be able to call __guc_context_update_clks() from intel_context_get_total_runtime_ns() to update 'active'? Is the reason for calling __guc_context_update_clks() periodically that the calculations in __guc_context_update_clks() become invalid if the counters overflow? > + > intel_gt_reset_unlock(gt, srcu); > > mod_delayed_work(system_highpri_wq, >timestamp.work, > @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) >guc->timestamp.ping_delay); > } > > +static void __guc_context_update_clks(struct intel_context *ce) > +{ > + struct intel_guc *guc = ce_to_guc(ce); > + struct intel_gt *gt = ce->engine->gt; > + u32 *pphwsp, last_switch, engine_id; > + u64 start_gt_clk, active; > + unsigned long flags; > + ktime_t unused; > + > + spin_lock_irqsave(>timestamp.lock, flags); > + > + /* > + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > + * relies on GuC and GPU for busyness calculations. Due to this, A > + * potential race was highlighted in an earlier review that can lead to > + * double accounting of busyness. While the solution to this is a wip, > + * busyness is still usable for platforms running GuC submission. > + */ > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > + > + guc_update_pm_timestamp(guc, ); > + > + if (engine_id != 0x && last_switch) { > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > + __extend_last_switch(guc, _gt_clk, last_switch); > + active = intel_gt_clock_interval_to_ns(gt, > guc->timestamp.gt_stamp - start_gt_clk); > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > + WRITE_ONCE(ce->stats.active, active); Should not need WRITE_ONCE to update regular memory. Not even sure we need READ_ONCE above. > + } else { > + lrc_update_runtime(ce); As was being discussed, should not need this here in this function. See below too. > + } > + > + spin_unlock_irqrestore(>timestamp.lock, flags); > +} > + > +static void guc_context_update_stats(struct intel_context *ce) > +{ > + if (!intel_context_pin_if_active(ce)) { > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > + WRITE_ONCE(ce->stats.active, 0); Why do these need to be initialized to 0? Looks like the calculations in __guc_context_update_clks() will work even if we don't do this? Also I didn't follow the 'if (!intel_context_pin_if_active(ce))' check. > + return; > + } > + > + __guc_context_update_clks(ce); > + intel_context_unpin(ce); > +} > + > static inline bool > submission_disabled(struct intel_guc *guc) > { > @@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) > { > struct
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Wed, 24 Aug 2022 22:03:19 -0700, Dixit, Ashutosh wrote: > > On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > > > > Hi Umesh, > > Still reviewing but I have a question below. Please ignore this mail for now, mostly a result of my misunderstanding the code. I will ask again if I have any questions. Thanks. > > > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c > > b/drivers/gpu/drm/i915/gt/intel_context.c > > index 654a092ed3d6..e2d70a9fdac0 100644 > > --- a/drivers/gpu/drm/i915/gt/intel_context.c > > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > > @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct > > intel_context *parent, > > child->parallel.parent = parent; > > } > > > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > > { > > u64 total, active; > > > > + if (ce->ops->update_stats) > > + ce->ops->update_stats(ce); > > + > > /snip/ > > > @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct > > *wrk) > > with_intel_runtime_pm(>i915->runtime_pm, wakeref) > > __update_guc_busyness_stats(guc); > > > > + /* adjust context stats for overflow */ > > + xa_for_each(>context_lookup, index, ce) > > + __guc_context_update_clks(ce); > > + > > The question is why do we have 2 functions: __guc_context_update_clks() > (which we call periodically from guc_timestamp_ping()) and > guc_context_update_stats() (which we call non-periodically from > intel_context_get_total_runtime_ns()? Why don't we have just one function > which is called from both places? Or rather why don't we call > guc_context_update_stats() from both places? > > If we don't call guc_context_update_stats() periodically from > guc_timestamp_ping() how e.g. does ce->stats.runtime.start_gt_clk get reset > to 0? If it gets reset to 0 in __guc_context_update_clks() then why do we > need to reset it in guc_context_update_stats()? > > Also IMO guc->timestamp.lock should be taken by this single function, > (otherwise guc_context_update_stats() is modifying > ce->stats.runtime.start_gt_clk without taking the lock). > > Thanks. > -- > Ashutosh > > > +static void __guc_context_update_clks(struct intel_context *ce) > > +{ > > + struct intel_guc *guc = ce_to_guc(ce); > > + struct intel_gt *gt = ce->engine->gt; > > + u32 *pphwsp, last_switch, engine_id; > > + u64 start_gt_clk, active; > > + unsigned long flags; > > + ktime_t unused; > > + > > + spin_lock_irqsave(>timestamp.lock, flags); > > + > > + /* > > +* GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > > +* out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > > +* relies on GuC and GPU for busyness calculations. Due to this, A > > +* potential race was highlighted in an earlier review that can lead to > > +* double accounting of busyness. While the solution to this is a wip, > > +* busyness is still usable for platforms running GuC submission. > > +*/ > > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > > + > > + guc_update_pm_timestamp(guc, ); > > + > > + if (engine_id != 0x && last_switch) { > > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > > + __extend_last_switch(guc, _gt_clk, last_switch); > > + active = intel_gt_clock_interval_to_ns(gt, > > guc->timestamp.gt_stamp - start_gt_clk); > > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > > + WRITE_ONCE(ce->stats.active, active); > > + } else { > > + lrc_update_runtime(ce); > > + } > > + > > + spin_unlock_irqrestore(>timestamp.lock, flags); > > +} > > + > > +static void guc_context_update_stats(struct intel_context *ce) > > +{ > > + if (!intel_context_pin_if_active(ce)) { > > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > > + WRITE_ONCE(ce->stats.active, 0); > > + return; > > + } > > + > > + __guc_context_update_clks(ce); > > + intel_context_unpin(ce); > > +}
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Wed, 15 Jun 2022 10:42:08 -0700, Umesh Nerlige Ramappa wrote: > > +static void __guc_context_update_clks(struct intel_context *ce) > +{ > + struct intel_guc *guc = ce_to_guc(ce); > + struct intel_gt *gt = ce->engine->gt; > + u32 *pphwsp, last_switch, engine_id; > + u64 start_gt_clk = 0, active = 0; > >>> > >>> No need to init these two. > >>> > + unsigned long flags; > + ktime_t unused; > + > + spin_lock_irqsave(>timestamp.lock, flags); > + > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > + > + guc_update_pm_timestamp(guc, ); > + > + if (engine_id != 0x && last_switch) { > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > + __extend_last_switch(guc, _gt_clk, last_switch); > + active = intel_gt_clock_interval_to_ns(gt, > guc->timestamp.gt_stamp - start_gt_clk); > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > + WRITE_ONCE(ce->stats.active, active); > + } else { > + lrc_update_runtime(ce); > >>> > >>> Why is this called from here? Presumably it was called already from > >>> guc_context_unpin if here code things context is not active. Or will be > >>> called shortly, once context save is done. > >> > >> guc_context_unpin is only called in the path of ce->sched_disable. The > >> sched_disable is implemented in GuC (H2G message). Once the > >> corresponding G2H response is received, the context is actually > >> unpinned, eventually calling guc_context_unpin. Also the context may not > >> necessarily be disabled after each context exit. > > > > So if I understand correctly, lrc runtime is only updated if someone is > > reading the busyness and not as part of normal context state transitions? > > If you mean context_in/out events (like csb interrupts), only GuC can see > those events. KMD has no visibility into that. These 3 paths call > lrc_update_runtime. > > user query: (engine_id != 0x && last_switch) translates to GuC > being within context_in and context_out events, so updating it outside of > this window is one way to report the correct busyness. > > worker: guc_timestamp_ping() also updates context stats (infrequently) for > all contexts primarily to take care of overflows. > > context unpin: Existing code calls lrc_update_runtime only when unpinning > the context which takes care of accumulating busyness when requests are > retired. Will adding lrc_update_runtime() to lrc_unpin() work?
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > Hi Umesh, Still reviewing but I have a question below. > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c > b/drivers/gpu/drm/i915/gt/intel_context.c > index 654a092ed3d6..e2d70a9fdac0 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.c > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct > intel_context *parent, > child->parallel.parent = parent; > } > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > { > u64 total, active; > > + if (ce->ops->update_stats) > + ce->ops->update_stats(ce); > + /snip/ > @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) > with_intel_runtime_pm(>i915->runtime_pm, wakeref) > __update_guc_busyness_stats(guc); > > + /* adjust context stats for overflow */ > + xa_for_each(>context_lookup, index, ce) > + __guc_context_update_clks(ce); > + The question is why do we have 2 functions: __guc_context_update_clks() (which we call periodically from guc_timestamp_ping()) and guc_context_update_stats() (which we call non-periodically from intel_context_get_total_runtime_ns()? Why don't we have just one function which is called from both places? Or rather why don't we call guc_context_update_stats() from both places? If we don't call guc_context_update_stats() periodically from guc_timestamp_ping() how e.g. does ce->stats.runtime.start_gt_clk get reset to 0? If it gets reset to 0 in __guc_context_update_clks() then why do we need to reset it in guc_context_update_stats()? Also IMO guc->timestamp.lock should be taken by this single function, (otherwise guc_context_update_stats() is modifying ce->stats.runtime.start_gt_clk without taking the lock). Thanks. -- Ashutosh > +static void __guc_context_update_clks(struct intel_context *ce) > +{ > + struct intel_guc *guc = ce_to_guc(ce); > + struct intel_gt *gt = ce->engine->gt; > + u32 *pphwsp, last_switch, engine_id; > + u64 start_gt_clk, active; > + unsigned long flags; > + ktime_t unused; > + > + spin_lock_irqsave(>timestamp.lock, flags); > + > + /* > + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > + * relies on GuC and GPU for busyness calculations. Due to this, A > + * potential race was highlighted in an earlier review that can lead to > + * double accounting of busyness. While the solution to this is a wip, > + * busyness is still usable for platforms running GuC submission. > + */ > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > + > + guc_update_pm_timestamp(guc, ); > + > + if (engine_id != 0x && last_switch) { > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > + __extend_last_switch(guc, _gt_clk, last_switch); > + active = intel_gt_clock_interval_to_ns(gt, > guc->timestamp.gt_stamp - start_gt_clk); > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > + WRITE_ONCE(ce->stats.active, active); > + } else { > + lrc_update_runtime(ce); > + } > + > + spin_unlock_irqrestore(>timestamp.lock, flags); > +} > + > +static void guc_context_update_stats(struct intel_context *ce) > +{ > + if (!intel_context_pin_if_active(ce)) { > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > + WRITE_ONCE(ce->stats.active, 0); > + return; > + } > + > + __guc_context_update_clks(ce); > + intel_context_unpin(ce); > +}
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Fri, Aug 05, 2022 at 10:45:30AM +0100, Tvrtko Ursulin wrote: On 05/08/2022 00:21, Umesh Nerlige Ramappa wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Remaining work: Enable and test context busyness for virtual_parent_context_ops and virtual_child_context_ops. I meant track the IGT work in the jira internally. :) Oh, I did do that and added this here as well. Note that I have not enabled the busyness in i915 for the parent/child context ops since I was not able to verify it yet. Otherwise: Acked-by: Tvrtko Ursulin Also, can someone else please do the full review? I'm afraid with the passage of time I forgot what little I knew about how GuC tracks this data. :( I will ask around Some nits and questions below. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ v4: - Update commit message with remaining work. - Rebase Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 654a092ed3d6..e2d70a9fdac0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 8e2d70630c49..3d1d7436c1a4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -58,7 +58,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -118,7 +118,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -362,7 +362,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 04eacae1aca5..f7ff4c7d81c7 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On 05/08/2022 00:21, Umesh Nerlige Ramappa wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Remaining work: Enable and test context busyness for virtual_parent_context_ops and virtual_child_context_ops. I meant track the IGT work in the jira internally. :) Otherwise: Acked-by: Tvrtko Ursulin Also, can someone else please do the full review? I'm afraid with the passage of time I forgot what little I knew about how GuC tracks this data. :( Some nits and questions below. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ v4: - Update commit message with remaining work. - Rebase Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 654a092ed3d6..e2d70a9fdac0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 8e2d70630c49..3d1d7436c1a4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -58,7 +58,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -118,7 +118,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -362,7 +362,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 04eacae1aca5..f7ff4c7d81c7 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT
[Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Remaining work: Enable and test context busyness for virtual_parent_context_ops and virtual_child_context_ops. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ v4: - Update commit message with remaining work. - Rebase Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 654a092ed3d6..e2d70a9fdac0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 8e2d70630c49..3d1d7436c1a4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -58,7 +58,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -118,7 +118,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -362,7 +362,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 04eacae1aca5..f7ff4c7d81c7 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*revoke)(struct intel_context *ce, struct i915_request *rq, @@ -56,6 +59,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce);
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On 04/08/2022 02:21, Umesh Nerlige Ramappa wrote: On Tue, Aug 02, 2022 at 04:38:45PM -0700, Umesh Nerlige Ramappa wrote: On Tue, Aug 02, 2022 at 09:41:38AM +0100, Tvrtko Ursulin wrote: On 01/08/2022 20:02, Umesh Nerlige Ramappa wrote: On Wed, Jul 27, 2022 at 09:48:18AM +0100, Tvrtko Ursulin wrote: On 27/07/2022 07:01, Umesh Nerlige Ramappa wrote: On Fri, Jun 17, 2022 at 09:00:06AM +0100, Tvrtko Ursulin wrote: On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend + * already provides the total active time of the context, so skip this + * calculation when this flag is set. + */ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Tue, Aug 02, 2022 at 04:38:45PM -0700, Umesh Nerlige Ramappa wrote: On Tue, Aug 02, 2022 at 09:41:38AM +0100, Tvrtko Ursulin wrote: On 01/08/2022 20:02, Umesh Nerlige Ramappa wrote: On Wed, Jul 27, 2022 at 09:48:18AM +0100, Tvrtko Ursulin wrote: On 27/07/2022 07:01, Umesh Nerlige Ramappa wrote: On Fri, Jun 17, 2022 at 09:00:06AM +0100, Tvrtko Ursulin wrote: On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend + * already provides the total active time of the context, so skip this + * calculation when this flag is set. + */ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce,
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Tue, Aug 02, 2022 at 09:41:38AM +0100, Tvrtko Ursulin wrote: On 01/08/2022 20:02, Umesh Nerlige Ramappa wrote: On Wed, Jul 27, 2022 at 09:48:18AM +0100, Tvrtko Ursulin wrote: On 27/07/2022 07:01, Umesh Nerlige Ramappa wrote: On Fri, Jun 17, 2022 at 09:00:06AM +0100, Tvrtko Ursulin wrote: On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend + * already provides the total active time of the context, so skip this + * calculation when this flag is set. + */ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops {
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On 01/08/2022 20:02, Umesh Nerlige Ramappa wrote: On Wed, Jul 27, 2022 at 09:48:18AM +0100, Tvrtko Ursulin wrote: On 27/07/2022 07:01, Umesh Nerlige Ramappa wrote: On Fri, Jun 17, 2022 at 09:00:06AM +0100, Tvrtko Ursulin wrote: On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend + * already provides the total active time of the context, so skip this + * calculation when this flag is set. + */ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Wed, Jul 27, 2022 at 09:48:18AM +0100, Tvrtko Ursulin wrote: On 27/07/2022 07:01, Umesh Nerlige Ramappa wrote: On Fri, Jun 17, 2022 at 09:00:06AM +0100, Tvrtko Ursulin wrote: On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend + * already provides the total active time of the context, so skip this + * calculation when this flag is set. + */ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On 27/07/2022 07:01, Umesh Nerlige Ramappa wrote: On Fri, Jun 17, 2022 at 09:00:06AM +0100, Tvrtko Ursulin wrote: On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend + * already provides the total active time of the context, so skip this + * calculation when this flag is set. + */ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); @@
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Fri, Jun 17, 2022 at 09:00:06AM +0100, Tvrtko Ursulin wrote: On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On 16/06/2022 23:13, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); @@
[Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations v3: - Add support for virtual engines based on https://patchwork.freedesktop.org/series/105227/ Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); @@ -146,6 +151,7 @@ struct intel_context { struct
[Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. v2: (Tvrtko) - Use COPS_RUNTIME_ACTIVE_TOTAL - Add code comment for the race - Undo local variables initializations Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 63 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 87 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..4a84146710e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend +* already provides the total active time of the context, so skip this +* calculation when this flag is set. +*/ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..797bb4242c18 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -55,6 +58,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); @@ -146,6 +151,7 @@ struct intel_context { struct ewma_runtime avg; u64 total; u32 last; +
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Wed, Jun 15, 2022 at 08:08:40AM +0100, Tvrtko Ursulin wrote: On 14/06/2022 17:32, Umesh Nerlige Ramappa wrote: On Tue, Jun 14, 2022 at 02:30:42PM +0100, Tvrtko Ursulin wrote: On 14/06/2022 01:46, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 11 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 3 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 55 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 75 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..a49f313db911 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,23 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * GuC backend returns the actual time the context was active, so skip + * the calculation here for GuC. + */ + if (active && !intel_engine_uses_guc(ce->engine)) What is the point of looking at ce->stats.active in GuC mode? I see that guc_context_update_stats/__guc_context_update_clks touches it, but I can't spot that there is a purpose to it. This is the only conditional reading it but it is short-circuited in GuC case. Also, since a GuC only vfunc (update_stats) has been added, I wonder why not just fork the whole runtime query (ce->get_total_runtime_ns). I think that would end up cleaner. active = intel_context_clock() - active; return total + active; In case of GuC the active is used directly here since the active updated in update_stats is equal to the active time of the context already. I will look into separate vfunc. Ah right, I misread something. But yes, I think a separate vfunc will look cleaner. Another option (instead of vfunc) is a similar flag to control the express the flavour of active? Flag does sound simpler. The guc context ops can have something like COPS_RUNTIME_ACTIVE_TOTAL that means total active time. diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } snip +static void __guc_context_update_clks(struct intel_context *ce) +{ + struct intel_guc *guc = ce_to_guc(ce); + struct intel_gt *gt = ce->engine->gt; + u32 *pphwsp, last_switch, engine_id; + u64 start_gt_clk = 0, active = 0; No need to init these two. + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(>timestamp.lock, flags); + + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); + + guc_update_pm_timestamp(guc, ); + + if (engine_id != 0x && last_switch) { + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); + __extend_last_switch(guc, _gt_clk, last_switch); + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); + WRITE_ONCE(ce->stats.active, active); + } else { + lrc_update_runtime(ce); Why is this called from here? Presumably it was
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On 14/06/2022 17:32, Umesh Nerlige Ramappa wrote: On Tue, Jun 14, 2022 at 02:30:42PM +0100, Tvrtko Ursulin wrote: On 14/06/2022 01:46, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 11 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 3 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 55 ++- drivers/gpu/drm/i915/i915_drm_client.c | 6 +- 6 files changed, 75 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..a49f313db911 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,23 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * GuC backend returns the actual time the context was active, so skip + * the calculation here for GuC. + */ + if (active && !intel_engine_uses_guc(ce->engine)) What is the point of looking at ce->stats.active in GuC mode? I see that guc_context_update_stats/__guc_context_update_clks touches it, but I can't spot that there is a purpose to it. This is the only conditional reading it but it is short-circuited in GuC case. Also, since a GuC only vfunc (update_stats) has been added, I wonder why not just fork the whole runtime query (ce->get_total_runtime_ns). I think that would end up cleaner. active = intel_context_clock() - active; return total + active; In case of GuC the active is used directly here since the active updated in update_stats is equal to the active time of the context already. I will look into separate vfunc. Ah right, I misread something. But yes, I think a separate vfunc will look cleaner. Another option (instead of vfunc) is a similar flag to control the express the flavour of active? diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } snip +static void __guc_context_update_clks(struct intel_context *ce) +{ + struct intel_guc *guc = ce_to_guc(ce); + struct intel_gt *gt = ce->engine->gt; + u32 *pphwsp, last_switch, engine_id; + u64 start_gt_clk = 0, active = 0; No need to init these two. + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(>timestamp.lock, flags); + + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); + + guc_update_pm_timestamp(guc, ); + + if (engine_id != 0x && last_switch) { + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); + __extend_last_switch(guc, _gt_clk, last_switch); + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); + WRITE_ONCE(ce->stats.active, active); + } else { + lrc_update_runtime(ce); Why is this called from here? Presumably it was called already from guc_context_unpin if here code things context is not active. Or will be called shortly, once context save is done. guc_context_unpin is only called in the path of
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On Tue, Jun 14, 2022 at 02:30:42PM +0100, Tvrtko Ursulin wrote: On 14/06/2022 01:46, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 11 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 3 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 55 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 75 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..a49f313db911 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,23 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* GuC backend returns the actual time the context was active, so skip +* the calculation here for GuC. +*/ + if (active && !intel_engine_uses_guc(ce->engine)) What is the point of looking at ce->stats.active in GuC mode? I see that guc_context_update_stats/__guc_context_update_clks touches it, but I can't spot that there is a purpose to it. This is the only conditional reading it but it is short-circuited in GuC case. Also, since a GuC only vfunc (update_stats) has been added, I wonder why not just fork the whole runtime query (ce->get_total_runtime_ns). I think that would end up cleaner. active = intel_context_clock() - active; return total + active; In case of GuC the active is used directly here since the active updated in update_stats is equal to the active time of the context already. I will look into separate vfunc. diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } snip +static void __guc_context_update_clks(struct intel_context *ce) +{ + struct intel_guc *guc = ce_to_guc(ce); + struct intel_gt *gt = ce->engine->gt; + u32 *pphwsp, last_switch, engine_id; + u64 start_gt_clk = 0, active = 0; No need to init these two. + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(>timestamp.lock, flags); + + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); + + guc_update_pm_timestamp(guc, ); + + if (engine_id != 0x && last_switch) { + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); + __extend_last_switch(guc, _gt_clk, last_switch); + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); + WRITE_ONCE(ce->stats.active, active); + } else { + lrc_update_runtime(ce); Why is this called from here? Presumably it was called already from guc_context_unpin if here code things context is not active. Or will be called shortly, once context save is done. guc_context_unpin is only called in the path of ce->sched_disable. The sched_disable is implemented in GuC (H2G message). Once the corresponding G2H response is
Re: [Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
On 14/06/2022 01:46, Nerlige Ramappa, Umesh wrote: From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 11 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 3 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 55 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 75 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..a49f313db911 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,23 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* GuC backend returns the actual time the context was active, so skip +* the calculation here for GuC. +*/ + if (active && !intel_engine_uses_guc(ce->engine)) What is the point of looking at ce->stats.active in GuC mode? I see that guc_context_update_stats/__guc_context_update_clks touches it, but I can't spot that there is a purpose to it. This is the only conditional reading it but it is short-circuited in GuC case. Also, since a GuC only vfunc (update_stats) has been added, I wonder why not just fork the whole runtime query (ce->get_total_runtime_ns). I think that would end up cleaner. active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..0a3290c99a31 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -55,6 +55,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); @@ -146,6 +148,7 @@ struct intel_context { struct ewma_runtime avg; u64 total; u32 last; + u64 start_gt_clk;
[Intel-gfx] [PATCH] i915/pmu: Wire GuC backend to per-client busyness
From: John Harrison GuC provides engine_id and last_switch_in ticks for an active context in the pphwsp. The context image provides a 32 bit total ticks which is the accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This information is used to calculate the context busyness as follows: If the engine_id is valid, then busyness is the sum of accumulated total ticks and active ticks. Active ticks is calculated with current gt time as reference. If engine_id is invalid, busyness is equal to accumulated total ticks. Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a potential race was highlighted in an earlier review that can lead to double accounting of busyness. While the solution to this is a wip, busyness is still usable for platforms running GuC submission. Signed-off-by: John Harrison Signed-off-by: Umesh Nerlige Ramappa --- drivers/gpu/drm/i915/gt/intel_context.c | 11 +++- drivers/gpu/drm/i915/gt/intel_context.h | 6 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 3 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 55 ++- drivers/gpu/drm/i915/i915_drm_client.c| 6 +- 6 files changed, 75 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4070cb5711d8..a49f313db911 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,23 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* +* GuC backend returns the actual time the context was active, so skip +* the calculation here for GuC. +*/ + if (active && !intel_engine_uses_guc(ce->engine)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b7d3214d2cdd..5fc7c19ab29b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -56,7 +56,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -116,7 +116,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(>pin_count); } @@ -351,7 +351,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, >flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..0a3290c99a31 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -55,6 +55,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); @@ -146,6 +148,7 @@ struct intel_context { struct ewma_runtime avg; u64 total; u32 last; + u64 start_gt_clk; I915_SELFTEST_DECLARE(u32 num_underflow); I915_SELFTEST_DECLARE(u32 max_underflow); } runtime; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index b3c9a9327f76..6231ad03e4eb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -196,6 +196,11 @@ static inline u8 guc_class_to_engine_class(u8 guc_class) return guc_class_engine_class_map[guc_class]; } +/* Per context