On Tue, Jul 07, 2015 at 07:24:15PM +0100, Morten Rasmussen wrote:
> Let available compute capacity and estimated energy impact select
> wake-up target cpu when energy-aware scheduling is enabled and the
> system in not over-utilized (above the tipping point).
> 
> energy_aware_wake_cpu() attempts to find group of cpus with sufficient
> compute capacity to accommodate the task and find a cpu with enough spare
> capacity to handle the task within that group. Preference is given to
> cpus with enough spare capacity at the current OPP. Finally, the energy
> impact of the new target and the previous task cpu is compared to select
> the wake-up target cpu.
> 
> cc: Ingo Molnar <mi...@redhat.com>
> cc: Peter Zijlstra <pet...@infradead.org>
> 
> Signed-off-by: Morten Rasmussen <morten.rasmus...@arm.com>
> ---
>  kernel/sched/fair.c | 85 
> ++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 84 insertions(+), 1 deletion(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 0f7dbda4..01f7337 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5427,6 +5427,86 @@ static int select_idle_sibling(struct task_struct *p, 
> int target)
>       return target;
>  }
>  
> +static int energy_aware_wake_cpu(struct task_struct *p, int target)
> +{
> +     struct sched_domain *sd;
> +     struct sched_group *sg, *sg_target;
> +     int target_max_cap = INT_MAX;
> +     int target_cpu = task_cpu(p);
> +     int i;
> +
> +     sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
> +
> +     if (!sd)
> +             return target;
> +
> +     sg = sd->groups;
> +     sg_target = sg;
> +
> +     /*
> +      * Find group with sufficient capacity. We only get here if no cpu is
> +      * overutilized. We may end up overutilizing a cpu by adding the task,
> +      * but that should not be any worse than select_idle_sibling().
> +      * load_balance() should sort it out later as we get above the tipping
> +      * point.
> +      */
> +     do {
> +             /* Assuming all cpus are the same in group */
> +             int max_cap_cpu = group_first_cpu(sg);
> +
> +             /*
> +              * Assume smaller max capacity means more energy-efficient.
> +              * Ideally we should query the energy model for the right
> +              * answer but it easily ends up in an exhaustive search.
> +              */
> +             if (capacity_of(max_cap_cpu) < target_max_cap &&
> +                 task_fits_capacity(p, max_cap_cpu)) {
> +                     sg_target = sg;
> +                     target_max_cap = capacity_of(max_cap_cpu);
> +             }

Here should consider scenario for two groups have same capacity?
This will benefit for the case LITTLE.LITTLE. So the code will be
looks like below:

        int target_sg_cpu = INT_MAX;

        if (capacity_of(max_cap_cpu) <= target_max_cap &&
            task_fits_capacity(p, max_cap_cpu)) {

                if ((capacity_of(max_cap_cpu) == target_max_cap) &&
                    (target_sg_cpu < max_cap_cpu))
                        continue;

                target_sg_cpu = max_cap_cpu;
                sg_target = sg;
                target_max_cap = capacity_of(max_cap_cpu);
        }

> +     } while (sg = sg->next, sg != sd->groups);
> +
> +     /* Find cpu with sufficient capacity */
> +     for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
> +             /*
> +              * p's blocked utilization is still accounted for on prev_cpu
> +              * so prev_cpu will receive a negative bias due the double
> +              * accouting. However, the blocked utilization may be zero.
> +              */
> +             int new_usage = get_cpu_usage(i) + task_utilization(p);
> +
> +             if (new_usage > capacity_orig_of(i))
> +                     continue;
> +
> +             if (new_usage < capacity_curr_of(i)) {
> +                     target_cpu = i;
> +                     if (cpu_rq(i)->nr_running)
> +                             break;
> +             }
> +
> +             /* cpu has capacity at higher OPP, keep it as fallback */
> +             if (target_cpu == task_cpu(p))
> +                     target_cpu = i;
> +     }
> +
> +     if (target_cpu != task_cpu(p)) {
> +             struct energy_env eenv = {
> +                     .usage_delta    = task_utilization(p),
> +                     .src_cpu        = task_cpu(p),
> +                     .dst_cpu        = target_cpu,
> +             };
> +
> +             /* Not enough spare capacity on previous cpu */
> +             if (cpu_overutilized(task_cpu(p)))
> +                     return target_cpu;
> +
> +             if (energy_diff(&eenv) >= 0)
> +                     return task_cpu(p);
> +     }
> +
> +     return target_cpu;
> +}
> +
>  /*
>   * select_task_rq_fair: Select target runqueue for the waking task in domains
>   * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
> @@ -5479,7 +5559,10 @@ select_task_rq_fair(struct task_struct *p, int 
> prev_cpu, int sd_flag, int wake_f
>               prev_cpu = cpu;
>  
>       if (sd_flag & SD_BALANCE_WAKE && want_sibling) {
> -             new_cpu = select_idle_sibling(p, prev_cpu);
> +             if (energy_aware() && !cpu_rq(cpu)->rd->overutilized)
> +                     new_cpu = energy_aware_wake_cpu(p, prev_cpu);
> +             else
> +                     new_cpu = select_idle_sibling(p, prev_cpu);
>               goto unlock;
>       }
>  
> -- 
> 1.9.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to