Re: [Xen-devel] [PATCH v3 01/41] lcoking/barriers, arch: Use smp barriers in smp_store_release()
On Tue, Jan 12, 2016 at 08:28:44AM -0800, Paul E. McKenney wrote: > On Sun, Jan 10, 2016 at 04:16:32PM +0200, Michael S. Tsirkin wrote: > > From: Davidlohr Bueso> > > > With commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()") > > it was made clear that the context of this call (and thus set_mb) > > is strictly for CPU ordering, as opposed to IO. As such all archs > > should use the smp variant of mb(), respecting the semantics and > > saving a mandatory barrier on UP. > > > > Signed-off-by: Davidlohr Bueso > > Signed-off-by: Peter Zijlstra (Intel) > > Cc: > > Cc: Andrew Morton > > Cc: Benjamin Herrenschmidt > > Cc: Heiko Carstens > > Cc: Linus Torvalds > > Cc: Paul E. McKenney > > Cc: Peter Zijlstra > > Cc: Thomas Gleixner > > Cc: Tony Luck > > Cc: d...@stgolabs.net > > Link: > > http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-d...@stgolabs.net > > Signed-off-by: Ingo Molnar > > Aside from a need for s/lcoking/locking/ in the subject line: > > Reviewed-by: Paul E. McKenney Thanks! Though Ingo already put this in tip tree like this, and I need a copy in my tree to avoid breaking bisect, so I will probably keep it exactly the same to avoid confusion. > > --- > > arch/ia64/include/asm/barrier.h| 2 +- > > arch/powerpc/include/asm/barrier.h | 2 +- > > arch/s390/include/asm/barrier.h| 2 +- > > include/asm-generic/barrier.h | 2 +- > > 4 files changed, 4 insertions(+), 4 deletions(-) > > > > diff --git a/arch/ia64/include/asm/barrier.h > > b/arch/ia64/include/asm/barrier.h > > index df896a1..209c4b8 100644 > > --- a/arch/ia64/include/asm/barrier.h > > +++ b/arch/ia64/include/asm/barrier.h > > @@ -77,7 +77,7 @@ do { > > \ > > ___p1; \ > > }) > > > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } > > while (0) > > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } > > while (0) > > > > /* > > * The group barrier in front of the rsm & ssm are necessary to ensure > > diff --git a/arch/powerpc/include/asm/barrier.h > > b/arch/powerpc/include/asm/barrier.h > > index 0eca6ef..a7af5fb 100644 > > --- a/arch/powerpc/include/asm/barrier.h > > +++ b/arch/powerpc/include/asm/barrier.h > > @@ -34,7 +34,7 @@ > > #define rmb() __asm__ __volatile__ ("sync" : : : "memory") > > #define wmb() __asm__ __volatile__ ("sync" : : : "memory") > > > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } > > while (0) > > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } > > while (0) > > > > #ifdef __SUBARCH_HAS_LWSYNC > > #define SMPWMB LWSYNC > > diff --git a/arch/s390/include/asm/barrier.h > > b/arch/s390/include/asm/barrier.h > > index d68e11e..7ffd0b1 100644 > > --- a/arch/s390/include/asm/barrier.h > > +++ b/arch/s390/include/asm/barrier.h > > @@ -36,7 +36,7 @@ > > #define smp_mb__before_atomic()smp_mb() > > #define smp_mb__after_atomic() smp_mb() > > > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); > > mb(); } while (0) > > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); > > } while (0) > > > > #define smp_store_release(p, v) > > \ > > do { > > \ > > diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h > > index b42afad..0f45f93 100644 > > --- a/include/asm-generic/barrier.h > > +++ b/include/asm-generic/barrier.h > > @@ -93,7 +93,7 @@ > > #endif /* CONFIG_SMP */ > > > > #ifndef smp_store_mb > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } > > while (0) > > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } > > while (0) > > #endif > > > > #ifndef smp_mb__before_atomic > > -- > > MST > > ___ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Re: [Xen-devel] [PATCH v3 01/41] lcoking/barriers, arch: Use smp barriers in smp_store_release()
On Sun, Jan 10, 2016 at 04:16:32PM +0200, Michael S. Tsirkin wrote: > From: Davidlohr Bueso> > With commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()") > it was made clear that the context of this call (and thus set_mb) > is strictly for CPU ordering, as opposed to IO. As such all archs > should use the smp variant of mb(), respecting the semantics and > saving a mandatory barrier on UP. > > Signed-off-by: Davidlohr Bueso > Signed-off-by: Peter Zijlstra (Intel) > Cc: > Cc: Andrew Morton > Cc: Benjamin Herrenschmidt > Cc: Heiko Carstens > Cc: Linus Torvalds > Cc: Paul E. McKenney > Cc: Peter Zijlstra > Cc: Thomas Gleixner > Cc: Tony Luck > Cc: d...@stgolabs.net > Link: > http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-d...@stgolabs.net > Signed-off-by: Ingo Molnar Aside from a need for s/lcoking/locking/ in the subject line: Reviewed-by: Paul E. McKenney > --- > arch/ia64/include/asm/barrier.h| 2 +- > arch/powerpc/include/asm/barrier.h | 2 +- > arch/s390/include/asm/barrier.h| 2 +- > include/asm-generic/barrier.h | 2 +- > 4 files changed, 4 insertions(+), 4 deletions(-) > > diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h > index df896a1..209c4b8 100644 > --- a/arch/ia64/include/asm/barrier.h > +++ b/arch/ia64/include/asm/barrier.h > @@ -77,7 +77,7 @@ do { > \ > ___p1; \ > }) > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } > while (0) > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } > while (0) > > /* > * The group barrier in front of the rsm & ssm are necessary to ensure > diff --git a/arch/powerpc/include/asm/barrier.h > b/arch/powerpc/include/asm/barrier.h > index 0eca6ef..a7af5fb 100644 > --- a/arch/powerpc/include/asm/barrier.h > +++ b/arch/powerpc/include/asm/barrier.h > @@ -34,7 +34,7 @@ > #define rmb() __asm__ __volatile__ ("sync" : : : "memory") > #define wmb() __asm__ __volatile__ ("sync" : : : "memory") > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } > while (0) > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } > while (0) > > #ifdef __SUBARCH_HAS_LWSYNC > #define SMPWMB LWSYNC > diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h > index d68e11e..7ffd0b1 100644 > --- a/arch/s390/include/asm/barrier.h > +++ b/arch/s390/include/asm/barrier.h > @@ -36,7 +36,7 @@ > #define smp_mb__before_atomic() smp_mb() > #define smp_mb__after_atomic() smp_mb() > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); > mb(); } while (0) > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); > } while (0) > > #define smp_store_release(p, v) > \ > do { \ > diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h > index b42afad..0f45f93 100644 > --- a/include/asm-generic/barrier.h > +++ b/include/asm-generic/barrier.h > @@ -93,7 +93,7 @@ > #endif /* CONFIG_SMP */ > > #ifndef smp_store_mb > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while > (0) > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } > while (0) > #endif > > #ifndef smp_mb__before_atomic > -- > MST > ___ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
[Xen-devel] [PATCH v3 01/41] lcoking/barriers, arch: Use smp barriers in smp_store_release()
From: Davidlohr BuesoWith commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()") it was made clear that the context of this call (and thus set_mb) is strictly for CPU ordering, as opposed to IO. As such all archs should use the smp variant of mb(), respecting the semantics and saving a mandatory barrier on UP. Signed-off-by: Davidlohr Bueso Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: d...@stgolabs.net Link: http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-d...@stgolabs.net Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/barrier.h| 2 +- arch/powerpc/include/asm/barrier.h | 2 +- arch/s390/include/asm/barrier.h| 2 +- include/asm-generic/barrier.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index df896a1..209c4b8 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -77,7 +77,7 @@ do { \ ___p1; \ }) -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) /* * The group barrier in front of the rsm & ssm are necessary to ensure diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 0eca6ef..a7af5fb 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -34,7 +34,7 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #ifdef __SUBARCH_HAS_LWSYNC #define SMPWMB LWSYNC diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index d68e11e..7ffd0b1 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -36,7 +36,7 @@ #define smp_mb__before_atomic()smp_mb() #define smp_mb__after_atomic() smp_mb() -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_store_release(p, v) \ do { \ diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index b42afad..0f45f93 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -93,7 +93,7 @@ #endif /* CONFIG_SMP */ #ifndef smp_store_mb -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #endif #ifndef smp_mb__before_atomic -- MST ___ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel