Add helper functions to make the flattened runqueue patch a little smaller.
The task_se_h_weight function is similar to task_se_h_load, but scales the task weight by the group weight, without taking the task's duty cycle into account. The task_se_in_cgroup helper is functionally identical to parent_entity, but directly calling a function with that name obscures what the other code is trying to use it for, and would make the code harder to understand. Signed-off-by: Rik van Riel <r...@surriel.com> --- kernel/sched/fair.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a751e7a9b228..6fea8849cc12 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -243,6 +243,7 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight const struct sched_class fair_sched_class; static unsigned long task_se_h_load(struct sched_entity *se); +static unsigned long task_se_h_weight(struct sched_entity *se); /************************************************************** * CFS operations on generic schedulable entities: @@ -411,6 +412,12 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) return se->parent; } +/* Is this (task) sched_entity in a non-root cgroup? */ +static inline bool task_se_in_cgroup(struct sched_entity *se) +{ + return parent_entity(se); +} + static void find_matching_se(struct sched_entity **se, struct sched_entity **pse) { @@ -7819,6 +7826,20 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) } } +static unsigned long task_se_h_weight(struct sched_entity *se) +{ + struct cfs_rq *cfs_rq; + + if (!task_se_in_cgroup(se)) + return se->load.weight; + + cfs_rq = group_cfs_rq_of_parent(se); + update_cfs_rq_h_load(cfs_rq); + + /* Reduce the load.weight by the h_load of the group the task is in. */ + return (cfs_rq->h_load * se->load.weight) >> SCHED_FIXEDPOINT_SHIFT; +} + static unsigned long task_se_h_load(struct sched_entity *se) { struct cfs_rq *cfs_rq = group_cfs_rq_of_parent(se); @@ -7855,6 +7876,11 @@ static unsigned long task_se_h_load(struct sched_entity *se) { return se->avg.load_avg; } + +static unsigned long task_se_h_weight(struct sched_entity *se) +{ + return se->load.weight; +} #endif /********** Helpers for find_busiest_group ************************/ -- 2.20.1