The functions check_preempt_tick() and entity_tick() are executed by
the leader of the group. As such, we already hold the lock for the
per CPU runqueue. Thus, we can use the quick path to resched_curr().
Also, hrtimers are only used/active on per-CPU runqueues. So, use that.

The function __account_cfs_rq_runtime() is called via the enqueue
path, where we don't necessarily hold the per-CPU runqueue lock.
Take the long route though resched_curr().

The function list_add_leaf_cfs_rq() manages a supposedly depth
ordered list of CFS runqueues that contribute to the load on a certain
runqueue. This is used during load balancing. We keep these lists per
hierarchy level, which corresponds to the lock we hold and also
keeps the per-CPU logic compatible to what is there.

Signed-off-by: Jan H. Schönherr <jscho...@amazon.de>
---
 kernel/sched/fair.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f55954e7cedc..fff88694560c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -342,7 +342,7 @@ static inline struct cfs_rq *parent_cfs_rq(struct cfs_rq 
*cfs_rq)
 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 {
        if (!cfs_rq->on_list) {
-               struct rq *rq = rq_of(cfs_rq);
+               struct rq *rq = hrq_of(cfs_rq);
                struct cfs_rq *pcfs_rq = parent_cfs_rq(cfs_rq);
                /*
                 * Ensure we either appear before our parent (if already
@@ -4072,7 +4072,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct 
sched_entity *curr)
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
        if (delta_exec > ideal_runtime) {
-               resched_curr(rq_of(cfs_rq));
+               resched_curr(cpu_rq_of(cfs_rq));
                /*
                 * The current task ran long enough, ensure it doesn't get
                 * re-elected due to buddy favours.
@@ -4096,7 +4096,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct 
sched_entity *curr)
                return;
 
        if (delta > ideal_runtime)
-               resched_curr(rq_of(cfs_rq));
+               resched_curr(cpu_rq_of(cfs_rq));
 }
 
 static void
@@ -4238,14 +4238,14 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity 
*curr, int queued)
         * validating it and just reschedule.
         */
        if (queued) {
-               resched_curr(rq_of(cfs_rq));
+               resched_curr(cpu_rq_of(cfs_rq));
                return;
        }
        /*
         * don't let the period tick interfere with the hrtick preemption
         */
        if (!sched_feat(DOUBLE_TICK) &&
-                       hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
+                       hrtimer_active(&cpu_rq_of(cfs_rq)->hrtick_timer))
                return;
 #endif
 
@@ -4422,7 +4422,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq 
*cfs_rq, u64 delta_exec)
         * hierarchy can be throttled
         */
        if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-               resched_curr(rq_of(cfs_rq));
+               resched_curr(hrq_of(cfs_rq));
 }
 
 static __always_inline
-- 
2.9.3.1.gcba166c.dirty

Reply via email to