When not running as Xen PV guest, patch in the optimal MSR instructions via alternative and use direct calls otherwise.
This will especially have positive effects for performance when not running as a Xen PV guest with paravirtualization enabled, as there will be no call overhead for MSR access functions any longer. Signed-off-by: Juergen Gross <[email protected]> --- V3: - new patch --- arch/x86/include/asm/paravirt-msr.h | 101 ++++++++++++++++++++++---- arch/x86/include/asm/paravirt_types.h | 1 + 2 files changed, 86 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/paravirt-msr.h b/arch/x86/include/asm/paravirt-msr.h index 4ce690b05600..122a7525ae17 100644 --- a/arch/x86/include/asm/paravirt-msr.h +++ b/arch/x86/include/asm/paravirt-msr.h @@ -27,33 +27,103 @@ extern struct pv_msr_ops pv_ops_msr; #define PV_CALLEE_SAVE_REGS_MSR_THUNK(func) \ __PV_CALLEE_SAVE_REGS_THUNK(func, ".text", MSR) +#define ASM_CLRERR "xor %[err],%[err]\n" + +#define PV_RDMSR_VAR(__msr, __val, __type, __func, __err) \ + asm volatile( \ + "1:\n" \ + ALTERNATIVE_2(PARAVIRT_CALL, \ + RDMSR_AND_SAVE_RESULT ASM_CLRERR, X86_FEATURE_ALWAYS, \ + ALT_CALL_INSTR, ALT_XEN_CALL) \ + "2:\n" \ + _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \ + : [err] "=d" (__err), [val] "=a" (__val), \ + ASM_CALL_CONSTRAINT \ + : paravirt_ptr(pv_ops_msr, __func), "c" (__msr) \ + : "cc") + +#define PV_RDMSR_CONST(__msr, __val, __type, __func, __err) \ + asm volatile( \ + "1:\n" \ + ALTERNATIVE_3(PARAVIRT_CALL, \ + RDMSR_AND_SAVE_RESULT ASM_CLRERR, X86_FEATURE_ALWAYS, \ + ASM_RDMSR_IMM ASM_CLRERR, X86_FEATURE_MSR_IMM, \ + ALT_CALL_INSTR, ALT_XEN_CALL) \ + "2:\n" \ + _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \ + : [err] "=d" (__err), [val] "=a" (__val), \ + ASM_CALL_CONSTRAINT \ + : paravirt_ptr(pv_ops_msr, __func), \ + "c" (__msr), [msr] "i" (__msr) \ + : "cc") + +#define PV_WRMSR_VAR(__msr, __val, __type, __func, __err) \ +({ \ + unsigned long rdx = rdx; \ + asm volatile( \ + "1:\n" \ + ALTERNATIVE_3(PARAVIRT_CALL, \ + "wrmsr;" ASM_CLRERR, X86_FEATURE_ALWAYS, \ + ASM_WRMSRNS ASM_CLRERR, X86_FEATURE_WRMSRNS, \ + ALT_CALL_INSTR, ALT_XEN_CALL) \ + "2:\n" \ + _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \ + : [err] "=a" (__err), "=d" (rdx), ASM_CALL_CONSTRAINT \ + : paravirt_ptr(pv_ops_msr, __func), \ + "0" (__val), "1" ((__val) >> 32), "c" (__msr) \ + : "memory", "cc"); \ +}) + +#define PV_WRMSR_CONST(__msr, __val, __type, __func, __err) \ +({ \ + unsigned long rdx = rdx; \ + asm volatile( \ + "1:\n" \ + ALTERNATIVE_4(PARAVIRT_CALL, \ + "wrmsr;" ASM_CLRERR, X86_FEATURE_ALWAYS, \ + ASM_WRMSRNS ASM_CLRERR, X86_FEATURE_WRMSRNS, \ + ASM_WRMSRNS_IMM ASM_CLRERR, X86_FEATURE_MSR_IMM,\ + ALT_CALL_INSTR, ALT_XEN_CALL) \ + "2:\n" \ + _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \ + : [err] "=a" (__err), "=d" (rdx), ASM_CALL_CONSTRAINT \ + : paravirt_ptr(pv_ops_msr, __func), \ + [val] "0" (__val), "1" ((__val) >> 32), \ + "c" (__msr), [msr] "i" (__msr) \ + : "memory", "cc"); \ +}) + static __always_inline u64 read_msr(u32 msr) { u64 val; + int err; - asm volatile(PARAVIRT_CALL - : "=a" (val), ASM_CALL_CONSTRAINT - : paravirt_ptr(pv_ops_msr, read_msr), "c" (msr) - : "rdx"); + if (__builtin_constant_p(msr)) + PV_RDMSR_CONST(msr, val, EX_TYPE_RDMSR, read_msr, err); + else + PV_RDMSR_VAR(msr, val, EX_TYPE_RDMSR, read_msr, err); return val; } static __always_inline void write_msr(u32 msr, u64 val) { - asm volatile(PARAVIRT_CALL - : ASM_CALL_CONSTRAINT - : paravirt_ptr(pv_ops_msr, write_msr), "c" (msr), "a" (val) - : "memory", "rdx"); + int err; + + if (__builtin_constant_p(msr)) + PV_WRMSR_CONST(msr, val, EX_TYPE_WRMSR, write_msr, err); + else + PV_WRMSR_VAR(msr, val, EX_TYPE_WRMSR, write_msr, err); } static __always_inline int read_msr_safe(u32 msr, u64 *val) { int err; - asm volatile(PARAVIRT_CALL - : [err] "=d" (err), "=a" (*val), ASM_CALL_CONSTRAINT - : paravirt_ptr(pv_ops_msr, read_msr_safe), "c" (msr)); + if (__builtin_constant_p(msr)) + PV_RDMSR_CONST(msr, *val, EX_TYPE_RDMSR_SAFE, read_msr_safe, err); + else + PV_RDMSR_VAR(msr, *val, EX_TYPE_RDMSR_SAFE, read_msr_safe, err); return err ? -EIO : 0; } @@ -62,11 +132,10 @@ static __always_inline int write_msr_safe(u32 msr, u64 val) { int err; - asm volatile(PARAVIRT_CALL - : [err] "=a" (err), ASM_CALL_CONSTRAINT - : paravirt_ptr(pv_ops_msr, write_msr_safe), - "c" (msr), "a" (val) - : "memory", "rdx"); + if (__builtin_constant_p(msr)) + PV_WRMSR_CONST(msr, val, EX_TYPE_WRMSR_SAFE, write_msr_safe, err); + else + PV_WRMSR_VAR(msr, val, EX_TYPE_WRMSR_SAFE, write_msr_safe, err); return err ? -EIO : 0; } diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 999a5abe54ed..bdaecc54c6ee 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -451,6 +451,7 @@ extern struct paravirt_patch_template pv_ops; #endif /* __ASSEMBLER__ */ #define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV) +#define ALT_XEN_CALL ALT_DIRECT_CALL(X86_FEATURE_XENPV) #ifdef CONFIG_X86_32 /* save and restore all caller-save registers, except return value */ -- 2.53.0

