Tell gcc that VMXOFF instruction clobbers condition codes and memory when executed. Also, correct original comments to remove kernel-doc syntax per Randy Dunlap's request.
Suggested-by: Randy Dunlap <rdun...@infradead.org> Signed-off-by: David P. Reed <dpr...@deepplum.com> --- arch/x86/include/asm/virtext.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 9aad0e0876fb..0ede8d04535a 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -30,7 +30,7 @@ static inline int cpu_has_vmx(void) } -/** Disable VMX on the current CPU +/* Disable VMX on the current CPU * * vmxoff causes a undefined-opcode exception if vmxon was not run * on the CPU previously. Only call this function if you know VMX @@ -38,7 +38,7 @@ static inline int cpu_has_vmx(void) */ static inline void cpu_vmxoff(void) { - asm volatile ("vmxoff"); + asm volatile ("vmxoff" ::: "cc", "memory"); cr4_clear_bits(X86_CR4_VMXE); } @@ -47,7 +47,7 @@ static inline int cpu_vmx_enabled(void) return __read_cr4() & X86_CR4_VMXE; } -/** Disable VMX if it is enabled on the current CPU +/* Disable VMX if it is enabled on the current CPU * * You shouldn't call this if cpu_has_vmx() returns 0. */ @@ -57,7 +57,7 @@ static inline void __cpu_emergency_vmxoff(void) cpu_vmxoff(); } -/** Disable VMX if it is supported and enabled on the current CPU +/* Disable VMX if it is supported and enabled on the current CPU */ static inline void cpu_emergency_vmxoff(void) { -- 2.26.2