A plain local_bh_disable() is documented as creating an RCU critical
section, and (at least) rcutorture expects this to be the case.  However,
in_softirq() doesn't block a grace period on PREEMPT_RT, since RCU checks
preempt_count() directly.  Even if RCU were changed to check
in_softirq(), that wouldn't allow blocked BH disablers to be boosted.

Fix this by calling rcu_read_lock() from local_bh_disable(), and update
rcu_read_lock_bh_held() accordingly.

Signed-off-by: Scott Wood <sw...@redhat.com>
---
 include/linux/rcupdate.h |  4 ++++
 kernel/rcu/update.c      |  4 ++++
 kernel/softirq.c         | 12 +++++++++---
 3 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index fb267bc04fdf..aca4e5e25ace 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -637,10 +637,12 @@ static inline void rcu_read_unlock(void)
 static inline void rcu_read_lock_bh(void)
 {
        local_bh_disable();
+#ifndef CONFIG_PREEMPT_RT_FULL
        __acquire(RCU_BH);
        rcu_lock_acquire(&rcu_bh_lock_map);
        RCU_LOCKDEP_WARN(!rcu_is_watching(),
                         "rcu_read_lock_bh() used illegally while idle");
+#endif
 }
 
 /*
@@ -650,10 +652,12 @@ static inline void rcu_read_lock_bh(void)
  */
 static inline void rcu_read_unlock_bh(void)
 {
+#ifndef CONFIG_PREEMPT_RT_FULL
        RCU_LOCKDEP_WARN(!rcu_is_watching(),
                         "rcu_read_unlock_bh() used illegally while idle");
        rcu_lock_release(&rcu_bh_lock_map);
        __release(RCU_BH);
+#endif
        local_bh_enable();
 }
 
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 3700b730ea55..eb653a329e0b 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -307,7 +307,11 @@ int rcu_read_lock_bh_held(void)
                return 0;
        if (!rcu_lockdep_current_cpu_online())
                return 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+       return lock_is_held(&rcu_lock_map) || irqs_disabled();
+#else
        return in_softirq() || irqs_disabled();
+#endif
 }
 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
 
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 473369122ddd..eb46dd3ff92d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -121,8 +121,10 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int 
cnt)
        long soft_cnt;
 
        WARN_ON_ONCE(in_irq());
-       if (!in_atomic())
+       if (!in_atomic()) {
                local_lock(bh_lock);
+               rcu_read_lock();
+       }
        soft_cnt = this_cpu_inc_return(softirq_counter);
        WARN_ON_ONCE(soft_cnt == 0);
 
@@ -155,8 +157,10 @@ void _local_bh_enable(void)
        local_irq_restore(flags);
 #endif
 
-       if (!in_atomic())
+       if (!in_atomic()) {
+               rcu_read_unlock();
                local_unlock(bh_lock);
+       }
 }
 
 void _local_bh_enable_rt(void)
@@ -189,8 +193,10 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int 
cnt)
        WARN_ON_ONCE(count < 0);
        local_irq_enable();
 
-       if (!in_atomic())
+       if (!in_atomic()) {
+               rcu_read_unlock();
                local_unlock(bh_lock);
+       }
 
        preempt_check_resched();
 }
-- 
1.8.3.1

Reply via email to