On Sun, Jan 15, 2017 at 11:47:35PM -0800, Josh Triplett wrote:
> On Sat, Jan 14, 2017 at 12:54:42AM -0800, Paul E. McKenney wrote:
> > This commit is the third step towards full abstraction of all accesses
> > to the ->dynticks counter, implementing the previously open-coded atomic
> > add of 1 and entry checks in a new rcu_dynticks_eqs_enter() function, and
> > the same but with exit checks in a new rcu_dynticks_eqs_exit() function.
> > This abstraction will ease changes to the ->dynticks counter operation.
> > 
> > Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
> 
> A couple of comments below.  With those addressed:
> Reviewed-by: Josh Triplett <j...@joshtriplett.org>
> 
> >  kernel/rcu/tree.c | 92 
> > +++++++++++++++++++++++++++++++++++++++----------------
> >  1 file changed, 66 insertions(+), 26 deletions(-)
> > 
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 805d55ee0b2a..fc49e008963a 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -282,6 +282,65 @@ static DEFINE_PER_CPU(struct rcu_dynticks, 
> > rcu_dynticks) = {
> >  };
> >  
> >  /*
> > + * Record entry into an extended quiescent state.  This is only to be
> > + * called when not already in an extended quiescent state.
> > + */
> > +static void rcu_dynticks_eqs_enter(void)
> > +{
> > +   struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> > +
> > +   /*
> > +    * CPUs seeing atomic_inc() must see prior RCU read-side critical
> > +    * sections, and we also must force ordering with the next idle
> > +    * sojourn.
> > +    */
> > +   smp_mb__before_atomic(); /* See above. */
> > +   atomic_inc(&rdtp->dynticks);
> > +   smp_mb__after_atomic(); /* See above. */
> > +   WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> > +                atomic_read(&rdtp->dynticks) & 0x1);
> > +}
> > +
> > +/*
> > + * Record exit from an extended quiescent state.  This is only to be
> > + * called from an extended quiescent state.
> > + */
> > +static void rcu_dynticks_eqs_exit(void)
> > +{
> > +   struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> > +
> > +   /*
> > +    * CPUs seeing atomic_inc() must see prior idle sojourns,
> > +    * and we also must force ordering with the next RCU read-side
> > +    * critical section.
> > +    */
> > +   smp_mb__before_atomic(); /* See above. */
> > +   atomic_inc(&rdtp->dynticks);
> > +   smp_mb__after_atomic(); /* See above. */
> > +   WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> > +                !(atomic_read(&rdtp->dynticks) & 0x1));
> > +}
> > +
> > +/*
> > + * Reset the current CPU's ->dynticks counter to indicate that the
> > + * newly onlined CPU is no longer in an extended quiescent state.
> > + * This will either leave the counter unchanged, or increment it
> > + * to the next non-quiescent value.
> > + *
> > + * The non-atomic test/increment sequence works because the upper bits
> > + * of the ->dynticks counter are manipulated only by the corresponding CPU,
> > + * or when the corresponding CPU is offline.
> > + */
> > +static void rcu_dynticks_eqs_online(void)
> > +{
> > +   struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> > +
> > +   if (atomic_read(&rdtp->dynticks) & 0x1)
> > +           return;
> > +   atomic_add(0x1, &rdtp->dynticks);
> > +}
> > +
> > +/*
> >   * Snapshot the ->dynticks counter with full ordering so as to allow
> >   * stable comparison of this counter with past and future snapshots.
> >   */
> > @@ -693,7 +752,7 @@ static void rcu_eqs_enter_common(long long oldval, bool 
> > user)
> >  {
> >     struct rcu_state *rsp;
> >     struct rcu_data *rdp;
> > -   struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> > +   struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks);
> 
> Rather than marking a local variable as __maybe_unused (such that the
> compiler can no longer help detect it as unused), could you move it into
> the portion of the function that uses it, so that if reached, it'll
> always get used?
> 
> >     trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);

Its only use is in the above event trace, which can be disabled via
CONFIG_RCU_TRACE=n.  I could put the definition of rdtp under #ifdef,
but this seems ugly.  I could eliminate the variable, substituting
the initialization for rdtp in the event trace, but that would make
for a very long line, or an odd line break.

Or am I missing something here?

> >     if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> > @@ -712,12 +771,7 @@ static void rcu_eqs_enter_common(long long oldval, 
> > bool user)
> >             do_nocb_deferred_wakeup(rdp);
> >     }
> >     rcu_prepare_for_idle();
> > -   /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
> > -   smp_mb__before_atomic();  /* See above. */
> > -   atomic_inc(&rdtp->dynticks);
> > -   smp_mb__after_atomic();  /* Force ordering with next sojourn. */
> > -   WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> > -                atomic_read(&rdtp->dynticks) & 0x1);
> > +   rcu_dynticks_eqs_enter();
> >     rcu_dynticks_task_enter();
> >  
> >     /*
> > @@ -846,15 +900,10 @@ void rcu_irq_exit_irqson(void)
> >   */
> >  static void rcu_eqs_exit_common(long long oldval, int user)
> >  {
> > -   struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> > +   struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks);
> 
> Same comment as above.
> 
> >     rcu_dynticks_task_exit();
> > -   smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
> > -   atomic_inc(&rdtp->dynticks);
> > -   /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
> > -   smp_mb__after_atomic();  /* See above. */
> > -   WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> > -                !(atomic_read(&rdtp->dynticks) & 0x1));
> > +   rcu_dynticks_eqs_exit();
> >     rcu_cleanup_after_idle();
> >     trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);

This one is used here, at the top level of the function, and a few
lines down.  I see the same options available.  Thoughts?

                                                        Thanx, Paul

> >     if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> > @@ -1001,11 +1050,7 @@ void rcu_nmi_enter(void)
> >      * period (observation due to Andy Lutomirski).
> >      */
> >     if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
> > -           smp_mb__before_atomic();  /* Force delay from prior write. */
> > -           atomic_inc(&rdtp->dynticks);
> > -           /* atomic_inc() before later RCU read-side crit sects */
> > -           smp_mb__after_atomic();  /* See above. */
> > -           WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
> > +           rcu_dynticks_eqs_exit();
> >             incby = 1;
> >     }
> >     rdtp->dynticks_nmi_nesting += incby;
> > @@ -1043,11 +1088,7 @@ void rcu_nmi_exit(void)
> >  
> >     /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
> >     rdtp->dynticks_nmi_nesting = 0;
> > -   /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
> > -   smp_mb__before_atomic();  /* See above. */
> > -   atomic_inc(&rdtp->dynticks);
> > -   smp_mb__after_atomic();  /* Force delay to next write. */
> > -   WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
> > +   rcu_dynticks_eqs_enter();
> >  }
> >  
> >  /**
> > @@ -3800,8 +3841,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
> >             init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
> >     rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
> >     rcu_sysidle_init_percpu_data(rdp->dynticks);
> > -   atomic_set(&rdp->dynticks->dynticks,
> > -              (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
> > +   rcu_dynticks_eqs_online();
> >     raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
> >  
> >     /*
> > -- 
> > 2.5.2
> > 
> 

Reply via email to