Make it all a function which does the WRMSR instead of having a hairy
inline asm.

Signed-off-by: Borislav Petkov <[email protected]>
---
 arch/x86/include/asm/cpufeatures.h   |  2 +-
 arch/x86/include/asm/nospec-branch.h | 13 ++++---------
 arch/x86/include/asm/processor.h     |  4 ++++
 arch/x86/kernel/cpu/bugs.c           |  7 +++++++
 4 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
index 6c6d862d66a1..6c033f6adc24 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -211,7 +211,7 @@
 #define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth 
Allocation */
 #define X86_FEATURE_RSB_CTXSW          ( 7*32+19) /* Fill RSB on context 
switches */
 
-#define X86_FEATURE_IBPB               ( 7*32+21) /* Indirect Branch 
Prediction Barrier enabled*/
+#define X86_FEATURE_IBPB               ( 7*32+21) /* Indirect Branch 
Prediction Barrier enabled */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
index 4f88e1b2599f..71ae2dd65259 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -183,15 +183,10 @@ static inline void vmexit_fill_RSB(void)
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-       asm volatile(ALTERNATIVE("",
-                                "movl %[msr], %%ecx\n\t"
-                                "movl %[val], %%eax\n\t"
-                                "movl $0, %%edx\n\t"
-                                "wrmsr",
-                                X86_FEATURE_IBPB)
-                    : : [msr] "i" (MSR_IA32_PRED_CMD),
-                        [val] "i" (PRED_CMD_IBPB)
-                    : "eax", "ecx", "edx", "memory");
+       alternative_input("",
+                        "call __ibp_barrier",
+                        X86_FEATURE_IBPB,
+                        ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
 }
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d3a67fba200a..4d372f1cea5a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -971,4 +971,8 @@ bool xen_set_default_idle(void);
 
 void stop_this_cpu(void *dummy);
 void df_debug(struct pt_regs *regs, long error_code);
+
+#ifdef CONFIG_RETPOLINE
+void __ibp_barrier(void);
+#endif
 #endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index be068aea6bda..448410fcffcf 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -304,3 +304,10 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
                       spectre_v2_bad_module ? " - vulnerable module loaded" : 
"");
 }
 #endif
+
+#ifdef CONFIG_RETPOLINE
+void __ibp_barrier(void)
+{
+       __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
+}
+#endif
-- 
2.13.0


-- 
Regards/Gruss,
    Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Reply via email to