All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
either with spin_lock_irq() or spin_lock_irqsave().
cgroup_rstat_flush_locked() itself acquires cgroup_rstat_cpu_lock which
is a raw_spin_lock. This lock is also acquired in cgroup_rstat_updated()
in IRQ context and therefore requires _irqsave() locking suffix in
cgroup_rstat_flush_locked().
Since there is no difference between spin_lock_t and raw_spin_lock_t
on !RT lockdep does not complain here. On RT lockdep complains because
the interrupts were not disabled here and a deadlock is possible.

Acquire the raw_spin_lock_t with disabled interrupts.

Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
 kernel/cgroup/rstat.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index d503d1a9007c..63fc5e472c82 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -157,8 +157,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, 
bool may_sleep)
                raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
                                                       cpu);
                struct cgroup *pos = NULL;
+               unsigned long flags;
 
-               raw_spin_lock(cpu_lock);
+               raw_spin_lock_irqsave(cpu_lock, flags);
                while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
                        struct cgroup_subsys_state *css;
 
@@ -170,7 +171,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, 
bool may_sleep)
                                css->ss->css_rstat_flush(css, cpu);
                        rcu_read_unlock();
                }
-               raw_spin_unlock(cpu_lock);
+               raw_spin_unlock_irqrestore(cpu_lock, flags);
 
                /* if @may_sleep, play nice and yield if necessary */
                if (may_sleep && (need_resched() ||
-- 
2.18.0

Reply via email to