Commit-ID:  3cd126af79ed5a4d6b06eba63d3349e143a3bd3b
Gitweb:     https://git.kernel.org/tip/3cd126af79ed5a4d6b06eba63d3349e143a3bd3b
Author:     Peter Zijlstra <pet...@infradead.org>
AuthorDate: Mon, 7 Jan 2019 13:52:31 +0100
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Mon, 21 Jan 2019 14:40:28 +0100

sched/fair: Robustify CFS-bandwidth timer locking

Traditionally hrtimer callbacks were run with IRQs disabled, but with
the introduction of HRTIMER_MODE_SOFT it is possible they run from
SoftIRQ context, which does _NOT_ have IRQs disabled.

Allow for the CFS bandwidth timers (period_timer and slack_timer) to
be ran from SoftIRQ context; this entails removing the assumption that
IRQs are already disabled from the locking.

While mainline doesn't strictly need this, -RT forces all timers not
explicitly marked with MODE_HARD into MODE_SOFT and trips over this.
And marking these timers as MODE_HARD doesn't make sense as they're
not required for RT operation and can potentially be quite expensive.

Reported-by: Tom Putzeys <tom.putz...@be.atlascopco.com>
Tested-by: Mike Galbraith <efa...@gmx.de>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Sebastian Andrzej Siewior <bige...@linutronix.de>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: 
https://lkml.kernel.org/r/20190107125231.ge14...@hirez.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/sched/fair.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b1374fbddd0d..3b61e19b504a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4565,7 +4565,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth 
*cfs_b,
                struct rq *rq = rq_of(cfs_rq);
                struct rq_flags rf;
 
-               rq_lock(rq, &rf);
+               rq_lock_irqsave(rq, &rf);
                if (!cfs_rq_throttled(cfs_rq))
                        goto next;
 
@@ -4582,7 +4582,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth 
*cfs_b,
                        unthrottle_cfs_rq(cfs_rq);
 
 next:
-               rq_unlock(rq, &rf);
+               rq_unlock_irqrestore(rq, &rf);
 
                if (!remaining)
                        break;
@@ -4598,7 +4598,7 @@ next:
  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
  * used to track this state.
  */
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, 
unsigned long flags)
 {
        u64 runtime, runtime_expires;
        int throttled;
@@ -4640,11 +4640,11 @@ static int do_sched_cfs_period_timer(struct 
cfs_bandwidth *cfs_b, int overrun)
        while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
                runtime = cfs_b->runtime;
                cfs_b->distribute_running = 1;
-               raw_spin_unlock(&cfs_b->lock);
+               raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
                /* we can't nest cfs_b->lock while distributing bandwidth */
                runtime = distribute_cfs_runtime(cfs_b, runtime,
                                                 runtime_expires);
-               raw_spin_lock(&cfs_b->lock);
+               raw_spin_lock_irqsave(&cfs_b->lock, flags);
 
                cfs_b->distribute_running = 0;
                throttled = !list_empty(&cfs_b->throttled_cfs_rq);
@@ -4753,17 +4753,18 @@ static __always_inline void 
return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 {
        u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
+       unsigned long flags;
        u64 expires;
 
        /* confirm we're still not at a refresh boundary */
-       raw_spin_lock(&cfs_b->lock);
+       raw_spin_lock_irqsave(&cfs_b->lock, flags);
        if (cfs_b->distribute_running) {
-               raw_spin_unlock(&cfs_b->lock);
+               raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
                return;
        }
 
        if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
-               raw_spin_unlock(&cfs_b->lock);
+               raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
                return;
        }
 
@@ -4774,18 +4775,18 @@ static void do_sched_cfs_slack_timer(struct 
cfs_bandwidth *cfs_b)
        if (runtime)
                cfs_b->distribute_running = 1;
 
-       raw_spin_unlock(&cfs_b->lock);
+       raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 
        if (!runtime)
                return;
 
        runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
 
-       raw_spin_lock(&cfs_b->lock);
+       raw_spin_lock_irqsave(&cfs_b->lock, flags);
        if (expires == cfs_b->runtime_expires)
                lsub_positive(&cfs_b->runtime, runtime);
        cfs_b->distribute_running = 0;
-       raw_spin_unlock(&cfs_b->lock);
+       raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 }
 
 /*
@@ -4863,20 +4864,21 @@ static enum hrtimer_restart 
sched_cfs_period_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
                container_of(timer, struct cfs_bandwidth, period_timer);
+       unsigned long flags;
        int overrun;
        int idle = 0;
 
-       raw_spin_lock(&cfs_b->lock);
+       raw_spin_lock_irqsave(&cfs_b->lock, flags);
        for (;;) {
                overrun = hrtimer_forward_now(timer, cfs_b->period);
                if (!overrun)
                        break;
 
-               idle = do_sched_cfs_period_timer(cfs_b, overrun);
+               idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
        }
        if (idle)
                cfs_b->period_active = 0;
-       raw_spin_unlock(&cfs_b->lock);
+       raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 }

Reply via email to