From: "Paul E. McKenney" <paul...@kernel.org>

A kernel built with CONFIG_RCU_STRICT_GRACE_PERIOD=y needs a quiescent
state to appear very shortly after a CPU has noticed a new grace period.
Placing an RCU reader immediately after this point is ineffective because
this normally happens in softirq context, which acts as a big RCU reader.
This commit therefore introduces a new per-CPU work_struct, which is
used at the end of rcu_core() processing to schedule an RCU read-side
critical section from within a clean environment.

Reported-by Jann Horn <ja...@google.com>
Signed-off-by: Paul E. McKenney <paul...@kernel.org>
---
 kernel/rcu/tree.c | 13 +++++++++++++
 kernel/rcu/tree.h |  1 +
 2 files changed, 14 insertions(+)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index dd7af40..ac37343 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2634,6 +2634,14 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
+// Workqueue handler for an RCU reader for kernels enforcing struct RCU
+// grace periods.
+static void strict_work_handler(struct work_struct *work)
+{
+       rcu_read_lock();
+       rcu_read_unlock();
+}
+
 /* Perform RCU core processing work for the current CPU.  */
 static __latent_entropy void rcu_core(void)
 {
@@ -2678,6 +2686,10 @@ static __latent_entropy void rcu_core(void)
        /* Do any needed deferred wakeups of rcuo kthreads. */
        do_nocb_deferred_wakeup(rdp);
        trace_rcu_utilization(TPS("End RCU core"));
+
+       // If strict GPs, schedule an RCU reader in a clean environment.
+       if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
+               queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
 }
 
 static void rcu_core_si(struct softirq_action *h)
@@ -3874,6 +3886,7 @@ rcu_boot_init_percpu_data(int cpu)
 
        /* Set up local state, ensuring consistent view of global state. */
        rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
+       INIT_WORK(&rdp->strict_work, strict_work_handler);
        WARN_ON_ONCE(rdp->dynticks_nesting != 1);
        WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
        rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 309bc7f..e4f66b8 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -165,6 +165,7 @@ struct rcu_data {
                                        /* period it is aware of. */
        struct irq_work defer_qs_iw;    /* Obtain later scheduler attention. */
        bool defer_qs_iw_pending;       /* Scheduler attention pending? */
+       struct work_struct strict_work; /* Schedule readers for strict GPs. */
 
        /* 2) batch handling */
        struct rcu_segcblist cblist;    /* Segmented callback list, with */
-- 
2.9.5

Reply via email to