> @@ -3120,6 +3124,7 @@ static int wake_affine(struct sched_domain *sd, struct 
> task_struct *p, int sync)
>       struct task_group *tg;
>       unsigned long weight;
>       int balanced;
> +     int runnable_avg;
>  
>       idx       = sd->wake_idx;
>       this_cpu  = smp_processor_id();
> @@ -3135,13 +3140,19 @@ static int wake_affine(struct sched_domain *sd, 
> struct task_struct *p, int sync)
>       if (sync) {
>               tg = task_group(current);
>               weight = current->se.load.weight;
> +             runnable_avg = current->se.avg.runnable_avg_sum * NICE_0_LOAD
> +                             / (current->se.avg.runnable_avg_period + 1);
>  
> -             this_load += effective_load(tg, this_cpu, -weight, -weight);
> -             load += effective_load(tg, prev_cpu, 0, -weight);
> +             this_load += effective_load(tg, this_cpu, -weight, -weight)
> +                             * runnable_avg >> NICE_0_SHIFT;
> +             load += effective_load(tg, prev_cpu, 0, -weight)
> +                             * runnable_avg >> NICE_0_SHIFT;
>       }


I'm fairly sure this is wrong; but I haven't bothered to take pencil to paper.

I think you'll need to insert the runnable avg load and make sure
effective_load() uses the right sums itself.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to