In order to prepare for inlining RDMSR/WRMSR instructions via alternatives directly when running not in a Xen PV guest, switch the interfaces of the MSR related pvops callbacks to ones similar of the related instructions.
In order to prepare for supporting the immediate variants of RDMSR/WRMSR use a 64-bit interface instead of the 32-bit one of RDMSR/WRMSR. Signed-off-by: Juergen Gross <[email protected]> --- V3: - former patch 5 of V1 has been split - use 64-bit interface (Xin Li) --- arch/x86/include/asm/paravirt-msr.h | 49 ++++++++++++++++++++++++----- arch/x86/kernel/paravirt.c | 36 ++++++++++++++++++--- arch/x86/xen/enlighten_pv.c | 45 +++++++++++++++++++------- 3 files changed, 107 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/paravirt-msr.h b/arch/x86/include/asm/paravirt-msr.h index b299864b438a..4ce690b05600 100644 --- a/arch/x86/include/asm/paravirt-msr.h +++ b/arch/x86/include/asm/paravirt-msr.h @@ -6,36 +6,69 @@ struct pv_msr_ops { /* Unsafe MSR operations. These will warn or panic on failure. */ - u64 (*read_msr)(u32 msr); - void (*write_msr)(u32 msr, u64 val); + struct paravirt_callee_save read_msr; + struct paravirt_callee_save write_msr; /* Safe MSR operations. Returns 0 or -EIO. */ - int (*read_msr_safe)(u32 msr, u64 *val); - int (*write_msr_safe)(u32 msr, u64 val); + struct paravirt_callee_save read_msr_safe; + struct paravirt_callee_save write_msr_safe; u64 (*read_pmc)(int counter); } __no_randomize_layout; extern struct pv_msr_ops pv_ops_msr; +#define PV_PROLOGUE_MSR(func) \ + PV_SAVE_COMMON_CALLER_REGS \ + PV_PROLOGUE_MSR_##func + +#define PV_EPILOGUE_MSR(func) PV_RESTORE_COMMON_CALLER_REGS + +#define PV_CALLEE_SAVE_REGS_MSR_THUNK(func) \ + __PV_CALLEE_SAVE_REGS_THUNK(func, ".text", MSR) + static __always_inline u64 read_msr(u32 msr) { - return PVOP_CALL1(u64, pv_ops_msr, read_msr, msr); + u64 val; + + asm volatile(PARAVIRT_CALL + : "=a" (val), ASM_CALL_CONSTRAINT + : paravirt_ptr(pv_ops_msr, read_msr), "c" (msr) + : "rdx"); + + return val; } static __always_inline void write_msr(u32 msr, u64 val) { - PVOP_VCALL2(pv_ops_msr, write_msr, msr, val); + asm volatile(PARAVIRT_CALL + : ASM_CALL_CONSTRAINT + : paravirt_ptr(pv_ops_msr, write_msr), "c" (msr), "a" (val) + : "memory", "rdx"); } static __always_inline int read_msr_safe(u32 msr, u64 *val) { - return PVOP_CALL2(int, pv_ops_msr, read_msr_safe, msr, val); + int err; + + asm volatile(PARAVIRT_CALL + : [err] "=d" (err), "=a" (*val), ASM_CALL_CONSTRAINT + : paravirt_ptr(pv_ops_msr, read_msr_safe), "c" (msr)); + + return err ? -EIO : 0; } static __always_inline int write_msr_safe(u32 msr, u64 val) { - return PVOP_CALL2(int, pv_ops_msr, write_msr_safe, msr, val); + int err; + + asm volatile(PARAVIRT_CALL + : [err] "=a" (err), ASM_CALL_CONSTRAINT + : paravirt_ptr(pv_ops_msr, write_msr_safe), + "c" (msr), "a" (val) + : "memory", "rdx"); + + return err ? -EIO : 0; } static __always_inline u64 rdpmc(int counter) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 089a87ac1582..c0d78e4536c9 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -49,12 +49,40 @@ unsigned long pv_native_save_fl(void); void pv_native_irq_disable(void); void pv_native_irq_enable(void); unsigned long pv_native_read_cr2(void); +void pv_native_rdmsr(void); +void pv_native_wrmsr(void); +void pv_native_rdmsr_safe(void); +void pv_native_wrmsr_safe(void); DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text); DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text); DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text); DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text); DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); +DEFINE_ASM_FUNC(pv_native_rdmsr, + "1: rdmsr\n" + "shl $32, %rdx; or %rdx, %rax\n" + "2:\n" + _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR), .noinstr.text); +DEFINE_ASM_FUNC(pv_native_wrmsr, + "mov %rax, %rdx; shr $32, %rdx\n" + "1: wrmsr\n" + "2:\n" + _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR), .noinstr.text); +DEFINE_ASM_FUNC(pv_native_rdmsr_safe, + "1: rdmsr\n" + "shl $32, %rdx; or %rdx, %rax\n" + "xor %edx, %edx\n" + "2:\n" + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %%edx), + .noinstr.text); +DEFINE_ASM_FUNC(pv_native_wrmsr_safe, + "mov %rax, %rdx; shr $32, %rdx\n" + "1: wrmsr\n" + "xor %eax, %eax\n" + "2:\n" + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %%eax), + .noinstr.text); #endif static noinstr void pv_native_safe_halt(void) @@ -211,10 +239,10 @@ struct paravirt_patch_template pv_ops = { #ifdef CONFIG_PARAVIRT_XXL struct pv_msr_ops pv_ops_msr = { - .read_msr = native_read_msr, - .write_msr = native_write_msr, - .read_msr_safe = native_read_msr_safe, - .write_msr_safe = native_write_msr_safe, + .read_msr = __PV_IS_CALLEE_SAVE(pv_native_rdmsr), + .write_msr = __PV_IS_CALLEE_SAVE(pv_native_wrmsr), + .read_msr_safe = __PV_IS_CALLEE_SAVE(pv_native_rdmsr_safe), + .write_msr_safe = __PV_IS_CALLEE_SAVE(pv_native_wrmsr_safe), .read_pmc = native_read_pmc, }; EXPORT_SYMBOL(pv_ops_msr); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index b94437f26cc0..fed312a17033 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1154,15 +1154,32 @@ static void xen_do_write_msr(u32 msr, u64 val, int *err) } } -static int xen_read_msr_safe(u32 msr, u64 *val) +/* + * Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order + * to avoid warnings with "-Wmissing-prototypes". + */ +struct xen_rdmsr_safe_ret { + u64 val; + int err; +}; +struct xen_rdmsr_safe_ret xen_read_msr_safe(u32 msr); +int xen_write_msr_safe(u32 msr, u64 val); +u64 xen_read_msr(u32 msr); +void xen_write_msr(u32 msr, u64 val); +#define PV_PROLOGUE_RDMSR "mov %ecx, %edi;" +#define PV_PROLOGUE_WRMSR "mov %ecx, %edi; mov %rax, %rsi;" + +__visible struct xen_rdmsr_safe_ret xen_read_msr_safe(u32 msr) { - int err = 0; + struct xen_rdmsr_safe_ret ret = { 0, 0 }; - *val = xen_do_read_msr(msr, &err); - return err; + ret.val = xen_do_read_msr(msr, &ret.err); + return ret; } +#define PV_PROLOGUE_MSR_xen_read_msr_safe PV_PROLOGUE_RDMSR +PV_CALLEE_SAVE_REGS_MSR_THUNK(xen_read_msr_safe); -static int xen_write_msr_safe(u32 msr, u64 val) +__visible int xen_write_msr_safe(u32 msr, u64 val) { int err = 0; @@ -1170,20 +1187,26 @@ static int xen_write_msr_safe(u32 msr, u64 val) return err; } +#define PV_PROLOGUE_MSR_xen_write_msr_safe PV_PROLOGUE_WRMSR +PV_CALLEE_SAVE_REGS_MSR_THUNK(xen_write_msr_safe); -static u64 xen_read_msr(u32 msr) +__visible u64 xen_read_msr(u32 msr) { int err = 0; return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL); } +#define PV_PROLOGUE_MSR_xen_read_msr PV_PROLOGUE_RDMSR +PV_CALLEE_SAVE_REGS_MSR_THUNK(xen_read_msr); -static void xen_write_msr(u32 msr, u64 val) +__visible void xen_write_msr(u32 msr, u64 val) { int err; xen_do_write_msr(msr, val, xen_msr_safe ? &err : NULL); } +#define PV_PROLOGUE_MSR_xen_write_msr PV_PROLOGUE_WRMSR +PV_CALLEE_SAVE_REGS_MSR_THUNK(xen_write_msr); /* This is called once we have the cpu_possible_mask */ void __init xen_setup_vcpu_info_placement(void) @@ -1386,10 +1409,10 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si) pv_ops.cpu.start_context_switch = xen_start_context_switch; pv_ops.cpu.end_context_switch = xen_end_context_switch; - pv_ops_msr.read_msr = xen_read_msr; - pv_ops_msr.write_msr = xen_write_msr; - pv_ops_msr.read_msr_safe = xen_read_msr_safe; - pv_ops_msr.write_msr_safe = xen_write_msr_safe; + pv_ops_msr.read_msr = PV_CALLEE_SAVE(xen_read_msr); + pv_ops_msr.write_msr = PV_CALLEE_SAVE(xen_write_msr); + pv_ops_msr.read_msr_safe = PV_CALLEE_SAVE(xen_read_msr_safe); + pv_ops_msr.write_msr_safe = PV_CALLEE_SAVE(xen_write_msr_safe); pv_ops_msr.read_pmc = xen_read_pmc; xen_init_irq_ops(); -- 2.53.0

