Unconditionally lock rdp->nocb_lock on nocb code that is called after
we verified that the rdp is offloaded:

This clarify the locking rules and expectations.

Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Cc: Paul E. McKenney <paul...@kernel.org>
Cc: Josh Triplett <j...@joshtriplett.org>
Cc: Steven Rostedt <rost...@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
Cc: Lai Jiangshan <jiangshan...@gmail.com>
Cc: Joel Fernandes <j...@joelfernandes.org>
---
 kernel/rcu/tree_plugin.h | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 523570469864..1d22b16c03e0 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1628,11 +1628,11 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool 
force,
        if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
                trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                    TPS("AlreadyAwake"));
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                return;
        }
        del_timer(&rdp->nocb_timer);
-       rcu_nocb_unlock_irqrestore(rdp, flags);
+       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
        raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
        if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
                WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
@@ -1753,7 +1753,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, 
struct rcu_head *rhp,
 
        // Don't use ->nocb_bypass during early boot.
        if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-               rcu_nocb_lock(rdp);
+               raw_spin_lock(&rdp->nocb_lock);
                WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
                *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
                return false;
@@ -1778,7 +1778,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, 
struct rcu_head *rhp,
        // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
        // ->nocb_bypass first.
        if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
-               rcu_nocb_lock(rdp);
+               raw_spin_lock(&rdp->nocb_lock);
                *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
                if (*was_alldone)
                        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
@@ -1792,7 +1792,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, 
struct rcu_head *rhp,
        // flush ->nocb_bypass to ->cblist.
        if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
            ncbs >= qhimark) {
-               rcu_nocb_lock(rdp);
+               raw_spin_lock(&rdp->nocb_lock);
                if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
                        *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
                        if (*was_alldone)
@@ -1807,7 +1807,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, 
struct rcu_head *rhp,
                        rcu_advance_cbs_nowake(rdp->mynode, rdp);
                        rdp->nocb_gp_adv_time = j;
                }
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                return true; // Callback already enqueued.
        }
 
@@ -1827,7 +1827,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, 
struct rcu_head *rhp,
                local_irq_restore(flags);
        } else {
                // No-CBs GP kthread might be indefinitely asleep, if so, wake.
-               rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
+               raw_spin_lock(&rdp->nocb_lock); // Rare during call_rcu() flood.
                if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
                        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                            TPS("FirstBQwake"));
@@ -1835,7 +1835,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, 
struct rcu_head *rhp,
                } else {
                        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                            TPS("FirstBQnoWake"));
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                }
        }
        return true; // Callback already enqueued.
@@ -1861,7 +1861,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, 
bool was_alldone,
        if (rcu_nocb_poll || !t) {
                trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                    TPS("WakeNotPoll"));
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                return;
        }
        // Need to actually to a wakeup.
@@ -1876,7 +1876,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, 
bool was_alldone,
                } else {
                        wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
                                           TPS("WakeEmptyIsDeferred"));
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                }
        } else if (len > rdp->qlen_last_fqs_check + qhimark) {
                /* ... or if many callbacks queued. */
@@ -1894,10 +1894,10 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, 
bool was_alldone,
                    !timer_pending(&rdp->nocb_bypass_timer))
                        wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
                                           TPS("WakeOvfIsDeferred"));
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
        } else {
                trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
        }
        return;
 }
-- 
2.25.0

Reply via email to