In order to support carrying an srcu_read_lock() section across fork, where both the parent and child process will do: srcu_read_unlock(), it is needed to account for the extra decrement with an extra increment at fork time.
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> --- include/linux/srcu.h | 1 + include/linux/srcutiny.h | 10 ++++++++++ kernel/rcu/srcutree.c | 5 +++++ 3 files changed, 16 insertions(+) --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -55,6 +55,7 @@ void call_srcu(struct srcu_struct *ssp, void (*func)(struct rcu_head *head)); void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); +void __srcu_clone_read_lock(struct srcu_struct *ssp, int idx); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -71,6 +71,16 @@ static inline int __srcu_read_lock(struc return idx; } +static inline void __srcu_clone_read_lock(struct srcu_struct *ssp, int idx) +{ + int newval; + + preempt_disable(); // Needed for PREEMPT_AUTO + newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1; + WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); + preempt_enable(); +} + static inline void synchronize_srcu_expedited(struct srcu_struct *ssp) { synchronize_srcu(ssp); --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -720,6 +720,11 @@ int __srcu_read_lock(struct srcu_struct } EXPORT_SYMBOL_GPL(__srcu_read_lock); +void __srcu_clone_read_lock(struct srcu_struct *ssp, int idx) +{ + this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); +} + /* * Removes the count for the old reader from the appropriate per-CPU * element of the srcu_struct. Note that this may well be a different