On Fri, Nov 20, 2020 at 05:06:50PM +0530, Neeraj Upadhyay wrote:
> 
> 
> On 11/17/2020 6:10 AM, [email protected] wrote:
> > From: "Paul E. McKenney" <[email protected]>
> > 
> > There is a need for a polling interface for SRCU grace periods.
> > This polling needs to initiate an SRCU grace period without having
> > to queue (and manage) a callback.  This commit therefore splits the
> > Tree SRCU __call_srcu() function into callback-initialization and
> > queuing/start-grace-period portions, with the latter in a new function
> > named srcu_gp_start_if_needed().  This function may be passed a NULL
> > callback pointer, in which case it will refrain from queuing anything.
> > 
> > Why have the new function mess with queuing?  Locking considerations,
> > of course!
> > 
> > Link: https://lore.kernel.org/rcu/[email protected]/
> > Reported-by: Kent Overstreet <[email protected]>
> > Signed-off-by: Paul E. McKenney <[email protected]>
> > ---
> 
> Reviewed-by: Neeraj Upadhyay <[email protected]>

I applied both Reviewed-bys, thank you!

                                                        Thanx, Paul

> Thanks
> Neeraj
> 
> >   kernel/rcu/srcutree.c | 66 
> > +++++++++++++++++++++++++++++----------------------
> >   1 file changed, 37 insertions(+), 29 deletions(-)
> > 
> > diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
> > index 79b7081..d930ece 100644
> > --- a/kernel/rcu/srcutree.c
> > +++ b/kernel/rcu/srcutree.c
> > @@ -808,6 +808,42 @@ static void srcu_leak_callback(struct rcu_head *rhp)
> >   }
> >   /*
> > + * Start an SRCU grace period, and also queue the callback if non-NULL.
> > + */
> > +static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct 
> > rcu_head *rhp, bool do_norm)
> > +{
> > +   unsigned long flags;
> > +   int idx;
> > +   bool needexp = false;
> > +   bool needgp = false;
> > +   unsigned long s;
> > +   struct srcu_data *sdp;
> > +
> > +   idx = srcu_read_lock(ssp);
> > +   sdp = raw_cpu_ptr(ssp->sda);
> > +   spin_lock_irqsave_rcu_node(sdp, flags);
> > +   rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
> > +   rcu_segcblist_advance(&sdp->srcu_cblist,
> > +                         rcu_seq_current(&ssp->srcu_gp_seq));
> > +   s = rcu_seq_snap(&ssp->srcu_gp_seq);
> > +   (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
> > +   if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
> > +           sdp->srcu_gp_seq_needed = s;
> > +           needgp = true;
> > +   }
> > +   if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
> > +           sdp->srcu_gp_seq_needed_exp = s;
> > +           needexp = true;
> > +   }
> > +   spin_unlock_irqrestore_rcu_node(sdp, flags);
> > +   if (needgp)
> > +           srcu_funnel_gp_start(ssp, sdp, s, do_norm);
> > +   else if (needexp)
> > +           srcu_funnel_exp_start(ssp, sdp->mynode, s);
> > +   srcu_read_unlock(ssp, idx);
> > +}
> > +
> > +/*
> >    * Enqueue an SRCU callback on the srcu_data structure associated with
> >    * the current CPU and the specified srcu_struct structure, initiating
> >    * grace-period processing if it is not already running.
> > @@ -838,13 +874,6 @@ static void srcu_leak_callback(struct rcu_head *rhp)
> >   static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
> >                     rcu_callback_t func, bool do_norm)
> >   {
> > -   unsigned long flags;
> > -   int idx;
> > -   bool needexp = false;
> > -   bool needgp = false;
> > -   unsigned long s;
> > -   struct srcu_data *sdp;
> > -
> >     check_init_srcu_struct(ssp);
> >     if (debug_rcu_head_queue(rhp)) {
> >             /* Probable double call_srcu(), so leak the callback. */
> > @@ -853,28 +882,7 @@ static void __call_srcu(struct srcu_struct *ssp, 
> > struct rcu_head *rhp,
> >             return;
> >     }
> >     rhp->func = func;
> > -   idx = srcu_read_lock(ssp);
> > -   sdp = raw_cpu_ptr(ssp->sda);
> > -   spin_lock_irqsave_rcu_node(sdp, flags);
> > -   rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
> > -   rcu_segcblist_advance(&sdp->srcu_cblist,
> > -                         rcu_seq_current(&ssp->srcu_gp_seq));
> > -   s = rcu_seq_snap(&ssp->srcu_gp_seq);
> > -   (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
> > -   if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
> > -           sdp->srcu_gp_seq_needed = s;
> > -           needgp = true;
> > -   }
> > -   if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
> > -           sdp->srcu_gp_seq_needed_exp = s;
> > -           needexp = true;
> > -   }
> > -   spin_unlock_irqrestore_rcu_node(sdp, flags);
> > -   if (needgp)
> > -           srcu_funnel_gp_start(ssp, sdp, s, do_norm);
> > -   else if (needexp)
> > -           srcu_funnel_exp_start(ssp, sdp->mynode, s);
> > -   srcu_read_unlock(ssp, idx);
> > +   srcu_gp_start_if_needed(ssp, rhp, do_norm);
> >   }
> >   /**
> > 
> 
> -- 
> QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of
> the Code Aurora Forum, hosted by The Linux Foundation

Reply via email to