Hi Alex,

You can add my Reviewed-by for the below patch.

Thanks

Regards
Preeti U Murthy

On 04/04/2013 07:30 AM, Alex Shi wrote:
> The cpu's utilization is to measure how busy is the cpu.
>         util = cpu_rq(cpu)->avg.runnable_avg_sum * SCHED_POEWR_SCALE
>                 / cpu_rq(cpu)->avg.runnable_avg_period;
> 
> Since the util is no more than 1, we scale its value with 1024, same as
> SCHED_POWER_SCALE and set the FULL_UTIL as 1024.
> 
> In later power aware scheduling, we are sensitive for how busy of the
> cpu. Since as to power consuming, it is tight related with cpu busy
> time.
> 
> BTW, rq->util can be used for any purposes if needed, not only power
> scheduling.
> 
> Signed-off-by: Alex Shi <alex....@intel.com>
> ---
>  include/linux/sched.h | 2 +-
>  kernel/sched/debug.c  | 1 +
>  kernel/sched/fair.c   | 5 +++++
>  kernel/sched/sched.h  | 4 ++++
>  4 files changed, 11 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 5a4cf37..226a515 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -793,7 +793,7 @@ enum cpu_idle_type {
>  #define SCHED_LOAD_SCALE     (1L << SCHED_LOAD_SHIFT)
> 
>  /*
> - * Increase resolution of cpu_power calculations
> + * Increase resolution of cpu_power and rq->util calculations
>   */
>  #define SCHED_POWER_SHIFT    10
>  #define SCHED_POWER_SCALE    (1L << SCHED_POWER_SHIFT)
> diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
> index 75024a6..f5db759 100644
> --- a/kernel/sched/debug.c
> +++ b/kernel/sched/debug.c
> @@ -311,6 +311,7 @@ do {                                                      
>                 \
> 
>       P(ttwu_count);
>       P(ttwu_local);
> +     P(util);
> 
>  #undef P
>  #undef P64
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 2e49c3f..7124244 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1495,8 +1495,13 @@ static void update_cfs_rq_blocked_load(struct cfs_rq 
> *cfs_rq, int force_update)
> 
>  static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
>  {
> +     u32 period;
>       __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
>       __update_tg_runnable_avg(&rq->avg, &rq->cfs);
> +
> +     period = rq->avg.runnable_avg_period ? rq->avg.runnable_avg_period : 1;
> +     rq->util = (u64)(rq->avg.runnable_avg_sum << SCHED_POWER_SHIFT)
> +                             / period;
>  }
> 
>  /* Add the load generated by se into cfs_rq's child load-average */
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 804ee41..8682110 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -351,6 +351,9 @@ extern struct root_domain def_root_domain;
> 
>  #endif /* CONFIG_SMP */
> 
> +/* full cpu utilization */
> +#define FULL_UTIL    SCHED_POWER_SCALE
> +
>  /*
>   * This is the main, per-CPU runqueue data structure.
>   *
> @@ -482,6 +485,7 @@ struct rq {
>  #endif
> 
>       struct sched_avg avg;
> +     unsigned int util;
>  };
> 
>  static inline int cpu_of(struct rq *rq)
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to