On Wed, Mar 06, 2019 at 06:14:51PM +0100, Peter Zijlstra wrote:
> On Wed, Mar 06, 2019 at 02:13:47PM +0100, Peter Zijlstra wrote:
> > +static __always_inline unsigned long smap_save(void)
> > +{
> > +   unsigned long flags = arch_local_save_flags();
> > +   clac();
> > +   return flags;
> > +}
> > +
> > +static __always_inline void smap_restore(unsigned long flags)
> > +{
> > +   arch_local_irq_restore(flags);
> > +}
> 
> ARGH; the bloody paravirt me harder nonsense makes that pvops calls.
> 
> And that (obviously) explodes.. Anybody got any clue why that Xen
> trainwreck wants to paravirt: "PUSHF;POP" and "PUSH;POPF" !?
> 
> Also; I suppose I can ALTERNATIVE the whole thing, because Xen will not
> be having SMAP in the first place I suppose.

The below seems to 'work'.

--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -46,8 +46,6 @@
 
 #ifdef CONFIG_X86_SMAP
 
-#include <asm/irqflags.h>
-
 static __always_inline void clac(void)
 {
        /* Note: a barrier is implicit in alternative() */
@@ -62,14 +60,19 @@ static __always_inline void stac(void)
 
 static __always_inline unsigned long smap_save(void)
 {
-       unsigned long flags = arch_local_save_flags();
-       clac();
+       unsigned long flags;
+
+       asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
+                                 X86_FEATURE_SMAP)
+                     : "=rm" (flags) : : "memory", "cc");
+
        return flags;
 }
 
 static __always_inline void smap_restore(unsigned long flags)
 {
-       arch_local_irq_restore(flags);
+       asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
+                     : : "g" (flags) : "memory", "cc");
 }
 
 /* These macros can be used in asm() statements */

Reply via email to