GCC inline asm treats semicolons as instruction separators, so a
semicolon after the last instruction is not required.

Signed-off-by: Uros Bizjak <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Acked-by: Alexey Makhalov <[email protected]>
Cc: Ajay Kaher <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
---
 arch/x86/include/asm/paravirt-spinlock.h |  4 ++--
 arch/x86/include/asm/paravirt.h          | 16 ++++++++--------
 arch/x86/include/asm/paravirt_types.h    |  2 +-
 3 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/paravirt-spinlock.h 
b/arch/x86/include/asm/paravirt-spinlock.h
index a5011ef3a6cc..458b888aba84 100644
--- a/arch/x86/include/asm/paravirt-spinlock.h
+++ b/arch/x86/include/asm/paravirt-spinlock.h
@@ -38,14 +38,14 @@ static __always_inline void 
pv_queued_spin_lock_slowpath(struct qspinlock *lock,
 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
 {
        PVOP_ALT_VCALLEE1(pv_ops_lock, queued_spin_unlock, lock,
-                         "movb $0, (%%" _ASM_ARG1 ");",
+                         "movb $0, (%%" _ASM_ARG1 ")",
                          ALT_NOT(X86_FEATURE_PVUNLOCK));
 }
 
 static __always_inline bool pv_vcpu_is_preempted(long cpu)
 {
        return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
-                               "xor %%" _ASM_AX ", %%" _ASM_AX ";",
+                               "xor %%" _ASM_AX ", %%" _ASM_AX,
                                ALT_NOT(X86_FEATURE_VCPUPREEMPT));
 }
 
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b21072af731d..3d0b92a8a557 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -117,7 +117,7 @@ static inline void write_cr0(unsigned long x)
 static __always_inline unsigned long read_cr2(void)
 {
        return PVOP_ALT_CALLEE0(unsigned long, pv_ops, mmu.read_cr2,
-                               "mov %%cr2, %%rax;", ALT_NOT_XEN);
+                               "mov %%cr2, %%rax", ALT_NOT_XEN);
 }
 
 static __always_inline void write_cr2(unsigned long x)
@@ -128,7 +128,7 @@ static __always_inline void write_cr2(unsigned long x)
 static inline unsigned long __read_cr3(void)
 {
        return PVOP_ALT_CALL0(unsigned long, pv_ops, mmu.read_cr3,
-                             "mov %%cr3, %%rax;", ALT_NOT_XEN);
+                             "mov %%cr3, %%rax", ALT_NOT_XEN);
 }
 
 static inline void write_cr3(unsigned long x)
@@ -516,18 +516,18 @@ static inline void __set_fixmap(unsigned /* enum 
fixed_addresses */ idx,
 
 static __always_inline unsigned long arch_local_save_flags(void)
 {
-       return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop 
%%rax;",
+       return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop 
%%rax",
                                ALT_NOT_XEN);
 }
 
 static __always_inline void arch_local_irq_disable(void)
 {
-       PVOP_ALT_VCALLEE0(pv_ops, irq.irq_disable, "cli;", ALT_NOT_XEN);
+       PVOP_ALT_VCALLEE0(pv_ops, irq.irq_disable, "cli", ALT_NOT_XEN);
 }
 
 static __always_inline void arch_local_irq_enable(void)
 {
-       PVOP_ALT_VCALLEE0(pv_ops, irq.irq_enable, "sti;", ALT_NOT_XEN);
+       PVOP_ALT_VCALLEE0(pv_ops, irq.irq_enable, "sti", ALT_NOT_XEN);
 }
 
 static __always_inline unsigned long arch_local_irq_save(void)
@@ -553,9 +553,9 @@ static __always_inline unsigned long 
arch_local_irq_save(void)
        call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);
 .endm
 
-#define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl;",                  \
-                                "ALT_CALL_INSTR;", ALT_CALL_ALWAYS,    \
-                                "pushf; pop %rax;", ALT_NOT_XEN
+#define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl",                   \
+                                "ALT_CALL_INSTR", ALT_CALL_ALWAYS,     \
+                                "pushf; pop %rax", ALT_NOT_XEN
 #endif
 #endif /* CONFIG_PARAVIRT_XXL */
 #endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 7ccd41628d36..9bcf6bce88f6 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -210,7 +210,7 @@ extern struct paravirt_patch_template pv_ops;
  */
 #define PARAVIRT_CALL                                  \
        ANNOTATE_RETPOLINE_SAFE "\n\t"                  \
-       "call *%[paravirt_opptr];"
+       "call *%[paravirt_opptr]"
 
 /*
  * These macros are intended to wrap calls through one of the paravirt
-- 
2.52.0


Reply via email to