65;6003;1cOn Wed, Oct 21, 2020 at 03:08:12PM -0400, Joel Fernandes (Google) wrote: > This memory barrier is not needed as rcu_segcblist_add_len() already > includes a memory barrier *before* the length of the list is updated.
*before* and *after*. As you have both cases below. Thanks > > Same reasoning for rcu_segcblist_enqueue(). > > Signed-off-by: Joel Fernandes (Google) <j...@joelfernandes.org> > --- > kernel/rcu/rcu_segcblist.c | 1 - > kernel/rcu/tree.c | 1 - > 2 files changed, 2 deletions(-) > > diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c > index 19ff82b805fb..f0fcdf9d0f7f 100644 > --- a/kernel/rcu/rcu_segcblist.c > +++ b/kernel/rcu/rcu_segcblist.c > @@ -268,7 +268,6 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, > struct rcu_head *rhp) > { > rcu_segcblist_inc_len(rsclp); > - smp_mb(); /* Ensure counts are updated before callback is enqueued. */ > rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL); > rhp->next = NULL; > WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp); > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c > index 346a05506935..6c6d3c7036e6 100644 > --- a/kernel/rcu/tree.c > +++ b/kernel/rcu/tree.c > @@ -2525,7 +2525,6 @@ static void rcu_do_batch(struct rcu_data *rdp) > > /* Update counts and requeue any remaining callbacks. */ > rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); > - smp_mb(); /* List handling before counting for rcu_barrier(). */ > rcu_segcblist_add_len(&rdp->cblist, -count); > > /* Reinstate batch limit if we have worked down the excess. */ > -- > 2.29.0.rc1.297.gfa9743e501-goog >