On Thu, 22 Aug 2019 at 04:18, Rik van Riel <r...@surriel.com> wrote:
>
> Use an explicit "cfs_rq of parent sched_entity" helper in a few
> strategic places, where cfs_rq_of(se) may no longer point at the

The only case is the sched_entity of a task which will point to root
cfs, isn't it ?

> right runqueue once we flatten the hierarchical cgroup runqueues.
>
> No functional change.
>
> Signed-off-by: Rik van Riel <r...@surriel.com>
> ---
>  kernel/sched/fair.c | 17 +++++++++++++----
>  1 file changed, 13 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 04b216234265..31a26737a873 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -276,6 +276,15 @@ static inline struct cfs_rq *group_cfs_rq(struct 
> sched_entity *grp)
>         return grp->my_q;
>  }
>
> +/* runqueue owned by the parent entity; the root cfs_rq for a top level se */
> +static inline struct cfs_rq *group_cfs_rq_of_parent(struct sched_entity *se)
> +{
> +       if (se->parent)
> +               return group_cfs_rq(se->parent);
> +
> +       return cfs_rq_of(se);
> +}
> +
>  static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
>  {
>         struct rq *rq = rq_of(cfs_rq);
> @@ -3319,7 +3328,7 @@ static inline int propagate_entity_load_avg(struct 
> sched_entity *se)
>
>         gcfs_rq->propagate = 0;
>
> -       cfs_rq = cfs_rq_of(se);
> +       cfs_rq = group_cfs_rq_of_parent(se);
>
>         add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
>
> @@ -7796,7 +7805,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
>
>         WRITE_ONCE(cfs_rq->h_load_next, NULL);
>         for_each_sched_entity(se) {
> -               cfs_rq = cfs_rq_of(se);
> +               cfs_rq = group_cfs_rq_of_parent(se);
>                 WRITE_ONCE(cfs_rq->h_load_next, se);
>                 if (cfs_rq->last_h_load_update == now)
>                         break;
> @@ -7819,7 +7828,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
>
>  static unsigned long task_se_h_load(struct sched_entity *se)
>  {
> -       struct cfs_rq *cfs_rq = cfs_rq_of(se);
> +       struct cfs_rq *cfs_rq = group_cfs_rq_of_parent(se);
>
>         update_cfs_rq_h_load(cfs_rq);
>         return div64_ul(se->avg.load_avg * cfs_rq->h_load,
> @@ -10166,7 +10175,7 @@ static void task_tick_fair(struct rq *rq, struct 
> task_struct *curr, int queued)
>         struct sched_entity *se = &curr->se;
>
>         for_each_sched_entity(se) {
> -               cfs_rq = cfs_rq_of(se);
> +               cfs_rq = group_cfs_rq_of_parent(se);
>                 entity_tick(cfs_rq, se, queued);
>         }
>
> --
> 2.20.1
>

Reply via email to