vmlinux.o: warning: objtool: exc_page_fault()+0x9: call to read_cr2() leaves 
.noinstr.text section
vmlinux.o: warning: objtool: exc_page_fault()+0x24: call to prefetchw() leaves 
.noinstr.text section
vmlinux.o: warning: objtool: exc_page_fault()+0x21: call to 
kvm_handle_async_pf.isra.0() leaves .noinstr.text section
vmlinux.o: warning: objtool: exc_nmi()+0x1cc: call to write_cr2() leaves 
.noinstr.text section

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/include/asm/kvm_para.h      |    2 +-
 arch/x86/include/asm/processor.h     |    2 +-
 arch/x86/include/asm/special_insns.h |    8 ++++----
 3 files changed, 6 insertions(+), 6 deletions(-)

--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -141,7 +141,7 @@ static inline void kvm_disable_steal_tim
        return;
 }
 
-static inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 
token)
 {
        return false;
 }
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -823,7 +823,7 @@ static inline void prefetch(const void *
  * Useful for spinlocks to avoid one state transition in the
  * cache coherency protocol:
  */
-static inline void prefetchw(const void *x)
+static __always_inline void prefetchw(const void *x)
 {
        alternative_input(BASE_PREFETCH, "prefetchw %P1",
                          X86_FEATURE_3DNOWPREFETCH,
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -28,14 +28,14 @@ static inline unsigned long native_read_
        return val;
 }
 
-static inline unsigned long native_read_cr2(void)
+static __always_inline unsigned long native_read_cr2(void)
 {
        unsigned long val;
        asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
        return val;
 }
 
-static inline void native_write_cr2(unsigned long val)
+static __always_inline void native_write_cr2(unsigned long val)
 {
        asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
 }
@@ -160,12 +160,12 @@ static inline void write_cr0(unsigned lo
        native_write_cr0(x);
 }
 
-static inline unsigned long read_cr2(void)
+static __always_inline unsigned long read_cr2(void)
 {
        return native_read_cr2();
 }
 
-static inline void write_cr2(unsigned long x)
+static __always_inline void write_cr2(unsigned long x)
 {
        native_write_cr2(x);
 }


Reply via email to