spin_unlock() + spin_lock() together do not form a full memory barrier: (everything initialized to 0)
CPU1: a=1; spin_unlock(&b); spin_lock(&c); + smp_mb__after_unlock_lock(); r1=d; CPU2: d=1; smp_mb(); r2=a; Without the smp_mb__after_unlock_lock(), r1==0 && r2==0 would be possible. Signed-off-by: Manfred Spraul <manf...@colorfullife.com> --- include/asm-generic/barrier.h | 16 ++++++++++++++++ kernel/rcu/tree.h | 12 ------------ 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index fe297b5..9b4d28f 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -244,6 +244,22 @@ do { \ smp_acquire__after_ctrl_dep(); \ VAL; \ }) + +#ifndef smp_mb__after_unlock_lock +/* + * Place this after a lock-acquisition primitive to guarantee that + * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies + * if the UNLOCK and LOCK are executed by the same CPU or if the + * UNLOCK and LOCK operate on the same lock variable. + */ +#ifdef CONFIG_PPC +#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ +#else /* #ifdef CONFIG_PPC */ +#define smp_mb__after_unlock_lock() do { } while (0) +#endif /* #else #ifdef CONFIG_PPC */ + +#endif + #endif #endif /* !__ASSEMBLY__ */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e99a523..a0cd9ab 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -687,18 +687,6 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) #endif /* #ifdef CONFIG_RCU_TRACE */ /* - * Place this after a lock-acquisition primitive to guarantee that - * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies - * if the UNLOCK and LOCK are executed by the same CPU or if the - * UNLOCK and LOCK operate on the same lock variable. - */ -#ifdef CONFIG_PPC -#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ -#else /* #ifdef CONFIG_PPC */ -#define smp_mb__after_unlock_lock() do { } while (0) -#endif /* #else #ifdef CONFIG_PPC */ - -/* * Wrappers for the rcu_node::lock acquire and release. * * Because the rcu_nodes form a tree, the tree traversal locking will observe -- 2.5.5