On Fri, Mar 02, 2018 at 03:06:21PM -0500, Steven Rostedt wrote:
> On Thu, 1 Mar 2018 12:48:58 -0800
> "Paul E. McKenney" <paul...@linux.vnet.ibm.com> wrote:
> 
> > So how about I rename cond_resched_rcu_qs() to cond_resched_tasks_rcu_qs(),
> > which at least gives a hint as to where it needs to be used?
> > 
> > Would that work for you?
> 
> Yes, definitely!

Like this?

                                                        Thanx, Paul

------------------------------------------------------------------------

commit 4551cfd69a85393f478462fe5e16e42f0fa6391e
Author: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Date:   Fri Mar 2 16:35:27 2018 -0800

    rcu: Rename cond_resched_rcu_qs() to cond_resched_tasks_rcu_qs()
    
    Commit e31d28b6ab8f ("trace: Eliminate cond_resched_rcu_qs() in favor
    of cond_resched()") substituted cond_resched() for the earlier call
    to cond_resched_rcu_qs().  However, the new-age cond_resched() does
    not do anything to help RCU-tasks grace periods because (1) RCU-tasks
    is only enabled when CONFIG_PREEMPT=y and (2) cond_resched() is a
    complete no-op when preemption is enabled.  This situation results
    in hangs when running the trace benchmarks.
    
    A number of potential fixes were discussed on LKML
    (https://lkml.kernel.org/r/20180224151240.0d63a...@vmware.local.home),
    including making cond_resched() not be a no-op; making cond_resched()
    not be a no-op, but only when running tracing benchmarks; reverting
    the aforementioned commit (which works because cond_resched_rcu_qs()
    does provide an RCU-tasks quiescent state; and adding a call to the
    scheduler/RCU rcu_note_voluntary_context_switch() function.  All were
    deemed unsatisfactory, either due to added cond_resched() overhead or
    due to magic functions inviting cargo culting.
    
    This commit renames cond_resched_rcu_qs() to cond_resched_tasks_rcu_qs(),
    which provides a clear hint as to what this function is doing and
    why and where it should be used, and then replaces the call to
    cond_resched() with cond_resched_tasks_rcu_qs() in the trace benchmark's
    benchmark_event_kthread() function.
    
    Reported-by: Steven Rostedt <rost...@goodmis.org>
    Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 36360d07f25b..19d235fefdb9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -188,13 +188,13 @@ static inline void exit_tasks_rcu_finish(void) { }
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 
 /**
- * cond_resched_rcu_qs - Report potential quiescent states to RCU
+ * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
  *
  * This macro resembles cond_resched(), except that it is defined to
  * report potential quiescent states to RCU-tasks even if the cond_resched()
  * machinery were to be shut off, as some advocate for PREEMPT kernels.
  */
-#define cond_resched_rcu_qs() \
+#define cond_resched_tasks_rcu_qs() \
 do { \
        if (!cond_resched()) \
                rcu_note_voluntary_context_switch_lite(current); \
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 777e7a6a0292..e232846516b3 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -369,7 +369,7 @@ static bool __maybe_unused torturing_tasks(void)
  */
 static void rcu_perf_wait_shutdown(void)
 {
-       cond_resched_rcu_qs();
+       cond_resched_tasks_rcu_qs();
        if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
                return;
        while (!torture_must_stop())
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8fde264e24aa..381b47a68ac6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1234,10 +1234,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data 
*rdp)
        }
 
        /*
-        * Has this CPU encountered a cond_resched_rcu_qs() since the
-        * beginning of the grace period?  For this to be the case,
-        * the CPU has to have noticed the current grace period.  This
-        * might not be the case for nohz_full CPUs looping in the kernel.
+        * Has this CPU encountered a cond_resched() since the beginning
+        * of the grace period?  For this to be the case, the CPU has to
+        * have noticed the current grace period.  This might not be the
+        * case for nohz_full CPUs looping in the kernel.
         */
        jtsq = jiffies_till_sched_qs;
        ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
@@ -2049,7 +2049,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq_rcu_node(rnp);
-               cond_resched_rcu_qs();
+               cond_resched_tasks_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
        }
 
@@ -2152,7 +2152,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                sq = rcu_nocb_gp_get(rnp);
                raw_spin_unlock_irq_rcu_node(rnp);
                rcu_nocb_gp_cleanup(sq);
-               cond_resched_rcu_qs();
+               cond_resched_tasks_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
                rcu_gp_slow(rsp, gp_cleanup_delay);
        }
@@ -2203,7 +2203,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        /* Locking provides needed memory barrier. */
                        if (rcu_gp_init(rsp))
                                break;
-                       cond_resched_rcu_qs();
+                       cond_resched_tasks_rcu_qs();
                        WRITE_ONCE(rsp->gp_activity, jiffies);
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
@@ -2248,7 +2248,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                trace_rcu_grace_period(rsp->name,
                                                       READ_ONCE(rsp->gpnum),
                                                       TPS("fqsend"));
-                               cond_resched_rcu_qs();
+                               cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                ret = 0; /* Force full wait till next FQS. */
                                j = jiffies_till_next_fqs;
@@ -2261,7 +2261,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                }
                        } else {
                                /* Deal with stray signal. */
-                               cond_resched_rcu_qs();
+                               cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
@@ -2784,7 +2784,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int 
(*f)(struct rcu_data *rsp))
        struct rcu_node *rnp;
 
        rcu_for_each_leaf_node(rsp, rnp) {
-               cond_resched_rcu_qs();
+               cond_resched_tasks_rcu_qs();
                mask = 0;
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->qsmask == 0) {
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3695c12cfcdc..9accacffd138 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1632,7 +1632,7 @@ static int rcu_oom_notify(struct notifier_block *self,
 
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
-               cond_resched_rcu_qs();
+               cond_resched_tasks_rcu_qs();
        }
 
        /* Unconditionally decrement: no need to wake ourselves up. */
@@ -2261,7 +2261,7 @@ static int rcu_nocb_kthread(void *arg)
                                cl++;
                        c++;
                        local_bh_enable();
-                       cond_resched_rcu_qs();
+                       cond_resched_tasks_rcu_qs();
                        list = next;
                }
                trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 68fa19a5e7bd..e401960c7f51 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
  * grace period has elapsed, in other words after all currently
  * executing rcu-tasks read-side critical sections have elapsed.  These
  * read-side critical sections are delimited by calls to schedule(),
- * cond_resched_rcu_qs(), idle execution, userspace execution, calls
+ * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
  *
  * This is a very specialized primitive, intended only for a few uses in
diff --git a/kernel/torture.c b/kernel/torture.c
index 37b94012a3f8..3de1efbecd6a 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -574,7 +574,7 @@ void stutter_wait(const char *title)
 {
        int spt;
 
-       cond_resched_rcu_qs();
+       cond_resched_tasks_rcu_qs();
        spt = READ_ONCE(stutter_pause_test);
        for (; spt; spt = READ_ONCE(stutter_pause_test)) {
                if (spt == 1) {
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 22fee766081b..80e0b2aca703 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -159,13 +159,13 @@ static int benchmark_event_kthread(void *arg)
                 * wants to run, schedule in, but if the CPU is idle,
                 * we'll keep burning cycles.
                 *
-                * Note the _rcu_qs() version of cond_resched() will
+                * Note the tasks_rcu_qs() version of cond_resched() will
                 * notify synchronize_rcu_tasks() that this thread has
                 * passed a quiescent state for rcu_tasks. Otherwise
                 * this thread will never voluntarily schedule which would
                 * block synchronize_rcu_tasks() indefinitely.
                 */
-               cond_resched();
+               cond_resched_tasks_rcu_qs();
        }
 
        return 0;

Reply via email to