SWAPGS and SWAPGS_UNSAFE_STACK are assembly macros. Add C versions
of these macros (swapgs() and swapgs_unsafe_stack()).

Signed-off-by: Alexandre Chartre <alexandre.char...@oracle.com>
---
 arch/x86/include/asm/paravirt.h       | 15 +++++++++++++++
 arch/x86/include/asm/paravirt_types.h | 17 ++++++++++++-----
 2 files changed, 27 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index d25cc6830e89..a4898130b36b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -145,6 +145,21 @@ static inline void __write_cr4(unsigned long x)
        PVOP_VCALL1(cpu.write_cr4, x);
 }
 
+static inline void swapgs(void)
+{
+       PVOP_VCALL0(cpu.swapgs);
+}
+
+/*
+ * If swapgs is used while the userspace stack is still current,
+ * there's no way to call a pvop.  The PV replacement *must* be
+ * inlined, or the swapgs instruction must be trapped and emulated.
+ */
+static inline void swapgs_unsafe_stack(void)
+{
+       PVOP_VCALL0_ALT(cpu.swapgs, "swapgs");
+}
+
 static inline void arch_safe_halt(void)
 {
        PVOP_VCALL0(irq.safe_halt);
diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 0fad9f61c76a..eea9acc942a3 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -532,12 +532,12 @@ int paravirt_disable_iospace(void);
                      pre, post, ##__VA_ARGS__)
 
 
-#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)        
\
+#define ____PVOP_VCALL(op, insn, clbr, call_clbr, extra_clbr, pre, post, ...) \
        ({                                                              \
                PVOP_VCALL_ARGS;                                        \
                PVOP_TEST_NULL(op);                                     \
                asm volatile(pre                                        \
-                            paravirt_alt(PARAVIRT_CALL)                \
+                            paravirt_alt(insn)                         \
                             post                                       \
                             : call_clbr, ASM_CALL_CONSTRAINT           \
                             : paravirt_type(op),                       \
@@ -547,12 +547,17 @@ int paravirt_disable_iospace(void);
        })
 
 #define __PVOP_VCALL(op, pre, post, ...)                               \
-       ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
-                      VEXTRA_CLOBBERS,                                 \
+       ____PVOP_VCALL(op, PARAVIRT_CALL, CLBR_ANY,                     \
+                      PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS,            \
                       pre, post, ##__VA_ARGS__)
 
+#define __PVOP_VCALL_ALT(op, insn)                                     \
+       ____PVOP_VCALL(op, insn, CLBR_ANY,                              \
+                      PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS,            \
+                      "", "")
+
 #define __PVOP_VCALLEESAVE(op, pre, post, ...)                         \
-       ____PVOP_VCALL(op.func, CLBR_RET_REG,                           \
+       ____PVOP_VCALL(op.func, PARAVIRT_CALL, CLBR_RET_REG,            \
                      PVOP_VCALLEE_CLOBBERS, ,                          \
                      pre, post, ##__VA_ARGS__)
 
@@ -562,6 +567,8 @@ int paravirt_disable_iospace(void);
        __PVOP_CALL(rettype, op, "", "")
 #define PVOP_VCALL0(op)                                                        
\
        __PVOP_VCALL(op, "", "")
+#define PVOP_VCALL0_ALT(op, insn)                                      \
+       __PVOP_VCALL_ALT(op, insn)
 
 #define PVOP_CALLEE0(rettype, op)                                      \
        __PVOP_CALLEESAVE(rettype, op, "", "")
-- 
2.18.4

Reply via email to