Hi Juergen,

Sorry for the delay in reviewing this patch!

On 10/20/22 2:19 AM, Juergen Gross wrote:
> There are some paravirt assembler functions which are sharing a common
> pattern. Introduce a macro DEFINE_PARAVIRT_ASM() for creating them.
> 
> The explicit _paravirt_nop() prototype in paravirt.c isn't needed, as
> it is included in paravirt_types.h already.
> 
> Signed-off-by: Juergen Gross <jgr...@suse.com>

I just wanted to make a note that the part of this patch that's not
purely cleanup is the addition of the alignment (__ALIGN_STR) to
__raw_callee_save___kvm_vcpu_is_preempted(), _paravirt_nop() and
paravirt_ret0(). Maybe that's worth calling out in the commit message?

Reviewed-by: Srivatsa S. Bhat (VMware) <sriva...@csail.mit.edu>

Regards,
Srivatsa
VMware Photon OS

> ---
>  arch/x86/include/asm/paravirt.h           | 12 ++++++
>  arch/x86/include/asm/qspinlock_paravirt.h | 46 ++++++++++-------------
>  arch/x86/kernel/kvm.c                     | 19 +++-------
>  arch/x86/kernel/paravirt.c                | 22 ++---------
>  4 files changed, 40 insertions(+), 59 deletions(-)
> 
> diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
> index 2a0b8dd4ec33..479bf264b8aa 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -730,6 +730,18 @@ static __always_inline unsigned long 
> arch_local_irq_save(void)
>  #undef PVOP_VCALL4
>  #undef PVOP_CALL4
>  
> +#define DEFINE_PARAVIRT_ASM(func, instr, sec)                \
> +     asm (".pushsection " #sec ", \"ax\"\n"          \
> +          ".global " #func "\n\t"                    \
> +          ".type " #func ", @function\n\t"           \
> +          __ALIGN_STR "\n"                           \
> +          #func ":\n\t"                              \
> +          ASM_ENDBR                                  \
> +          instr                                      \
> +          ASM_RET                                    \
> +          ".size " #func ", . - " #func "\n\t"       \
> +          ".popsection")
> +
>  extern void default_banner(void);
>  
>  #else  /* __ASSEMBLY__ */
> diff --git a/arch/x86/include/asm/qspinlock_paravirt.h 
> b/arch/x86/include/asm/qspinlock_paravirt.h
> index 60ece592b220..c490f5eb9f3e 100644
> --- a/arch/x86/include/asm/qspinlock_paravirt.h
> +++ b/arch/x86/include/asm/qspinlock_paravirt.h
> @@ -14,8 +14,6 @@
>  
>  __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, 
> ".spinlock.text");
>  #define __pv_queued_spin_unlock      __pv_queued_spin_unlock
> -#define PV_UNLOCK            "__raw_callee_save___pv_queued_spin_unlock"
> -#define PV_UNLOCK_SLOWPATH   
> "__raw_callee_save___pv_queued_spin_unlock_slowpath"
>  
>  /*
>   * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
> @@ -37,32 +35,26 @@ 
> __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, 
> ".spinlock.text");
>   *   rsi = lockval           (second argument)
>   *   rdx = internal variable (set to 0)
>   */
> -asm    (".pushsection .spinlock.text;"
> -     ".globl " PV_UNLOCK ";"
> -     ".type " PV_UNLOCK ", @function;"
> -     ".align 4,0x90;"
> -     PV_UNLOCK ": "
> -     ASM_ENDBR
> -     FRAME_BEGIN
> -     "push  %rdx;"
> -     "mov   $0x1,%eax;"
> -     "xor   %edx,%edx;"
> -     LOCK_PREFIX "cmpxchg %dl,(%rdi);"
> -     "cmp   $0x1,%al;"
> -     "jne   .slowpath;"
> -     "pop   %rdx;"
> +#define PV_UNLOCK_ASM                                                        
> \
> +     FRAME_BEGIN                                                     \
> +     "push  %rdx\n\t"                                                \
> +     "mov   $0x1,%eax\n\t"                                           \
> +     "xor   %edx,%edx\n\t"                                           \
> +     LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t"                            \
> +     "cmp   $0x1,%al\n\t"                                            \
> +     "jne   .slowpath\n\t"                                           \
> +     "pop   %rdx\n\t"                                                \
> +     FRAME_END                                                       \
> +     ASM_RET                                                         \
> +     ".slowpath:\n\t"                                                \
> +     "push   %rsi\n\t"                                               \
> +     "movzbl %al,%esi\n\t"                                           \
> +     "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t"   \
> +     "pop    %rsi\n\t"                                               \
> +     "pop    %rdx\n\t"                                               \
>       FRAME_END
> -     ASM_RET
> -     ".slowpath: "
> -     "push   %rsi;"
> -     "movzbl %al,%esi;"
> -     "call " PV_UNLOCK_SLOWPATH ";"
> -     "pop    %rsi;"
> -     "pop    %rdx;"
> -     FRAME_END
> -     ASM_RET
> -     ".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
> -     ".popsection");
> +DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock, PV_UNLOCK_ASM,
> +                 .spinlock.text);
>  
>  #else /* CONFIG_64BIT */
>  
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index d4e48b4a438b..856708cc78e7 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -798,19 +798,12 @@ extern bool 
> __raw_callee_save___kvm_vcpu_is_preempted(long);
>   * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
>   * restoring to/from the stack.
>   */
> -asm(
> -".pushsection .text;"
> -".global __raw_callee_save___kvm_vcpu_is_preempted;"
> -".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
> -"__raw_callee_save___kvm_vcpu_is_preempted:"
> -ASM_ENDBR
> -"movq        __per_cpu_offset(,%rdi,8), %rax;"
> -"cmpb        $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
> -"setne       %al;"
> -ASM_RET
> -".size __raw_callee_save___kvm_vcpu_is_preempted, 
> .-__raw_callee_save___kvm_vcpu_is_preempted;"
> -".popsection");
> -
> +#define PV_VCPU_PREEMPTED_ASM                                                
>      \
> + "movq   __per_cpu_offset(,%rdi,8), %rax\n\t"                                
>      \
> + "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" 
> \
> + "setne  %al\n\t"
> +DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted,
> +                 PV_VCPU_PREEMPTED_ASM, .text);
>  #endif
>  
>  static void __init kvm_guest_init(void)
> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
> index 7ca2d46c08cc..6f306f885caf 100644
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -37,27 +37,11 @@
>   * nop stub, which must not clobber anything *including the stack* to
>   * avoid confusing the entry prologues.
>   */
> -extern void _paravirt_nop(void);
> -asm (".pushsection .entry.text, \"ax\"\n"
> -     ".global _paravirt_nop\n"
> -     "_paravirt_nop:\n\t"
> -     ASM_ENDBR
> -     ASM_RET
> -     ".size _paravirt_nop, . - _paravirt_nop\n\t"
> -     ".type _paravirt_nop, @function\n\t"
> -     ".popsection");
> +DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text);
>  
>  /* stub always returning 0. */
> -asm (".pushsection .entry.text, \"ax\"\n"
> -     ".global paravirt_ret0\n"
> -     "paravirt_ret0:\n\t"
> -     ASM_ENDBR
> -     "xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
> -     ASM_RET
> -     ".size paravirt_ret0, . - paravirt_ret0\n\t"
> -     ".type paravirt_ret0, @function\n\t"
> -     ".popsection");
> -
> +#define PV_RET0_ASM  "xor %" _ASM_AX ", %" _ASM_AX "\n\t"
> +DEFINE_PARAVIRT_ASM(paravirt_ret0, PV_RET0_ASM, .entry.text);
>  
>  void __init default_banner(void)
>  {
> 
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to