Yuyang Du <yuyang...@intel.com> writes:

> __compute_runnable_contrib() uses a loop to compute sum, whereas a
> table lookup can do it faster in a constant time.
>
> The program to generate the constants is located at:
> Documentation/scheduler/sched-avg.txt
>
> Signed-off-by: Yuyang Du <yuyang...@intel.com>
> Reviewed-by: Morten Rasmussen <morten.rasmus...@arm.com>
> Acked-by: Vincent Guittot <vincent.guit...@linaro.org>
> ---
>  kernel/sched/fair.c |   20 ++++++++++++--------
>  1 file changed, 12 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index b8cc1c3..6e0eec0 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2603,6 +2603,15 @@ static const u32 runnable_avg_yN_sum[] = {
>  };
>  
>  /*
> + * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
> + * lower integers.
> + */
> +static const u32 __accumulated_sum_N32[] = {
> +         0, 23371, 35056, 40899, 43820, 45281,
> +     46011, 46376, 46559, 46650, 46696, 46719,
> +};
> +
> +/*
>   * Approximate:
>   *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
>   */
> @@ -2650,14 +2659,9 @@ static u32 __compute_runnable_contrib(u64 n)
>       else if (unlikely(n >= LOAD_AVG_MAX_N))
>               return LOAD_AVG_MAX;
>  
> -     /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
> -     do {
> -             contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
> -             contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
> -
> -             n -= LOAD_AVG_PERIOD;
> -     } while (n > LOAD_AVG_PERIOD);
> -
> +     /* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
> +     contrib = __accumulated_sum_N32[n>>5]; /* =n/LOAD_AVG_PERIOD */

Just write / LOAD_AVG_PERIOD

> +     n %= LOAD_AVG_PERIOD;
>       contrib = decay_load(contrib, n);
>       return contrib + runnable_avg_yN_sum[n];
>  }

Reply via email to