From: Andi Kleen <a...@linux.intel.com>

To add trace points to msr accesses we need to include
linux/tracepoint.h. Unfortunately this causes hellish include loops
when done in asm/msr.h, which is included all over. I tried to fix
several of them, but eventually gave up.

This patch moves the MSR functions out of line. A MSR access is typically
40-100 cycles or even slower, a call is a few cycles at best, so the
additional function call is not really significant.

Kernel text size is neutral:
11852945        1671656 1822720 15347321         ea2e79 vmlinux-no-msr
11852969        1671656 1822720 15347345         ea2e91 vmlinux-msr

Another advantage is that we can now use the function_graph tracer
to see how expensive MSR accesses are.

Signed-off-by: Andi Kleen <a...@linux.intel.com>
---
 arch/x86/include/asm/msr.h | 51 ++++-----------------------------------------
 arch/x86/lib/msr.c         | 52 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 56 insertions(+), 47 deletions(-)

diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index de36f22..99d6864 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -57,53 +57,10 @@ static inline unsigned long long native_read_tscp(unsigned 
int *aux)
 #define EAX_EDX_RET(val, low, high)    "=A" (val)
 #endif
 
-static inline unsigned long long native_read_msr(unsigned int msr)
-{
-       DECLARE_ARGS(val, low, high);
-
-       asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
-       return EAX_EDX_VAL(val, low, high);
-}
-
-static inline unsigned long long native_read_msr_safe(unsigned int msr,
-                                                     int *err)
-{
-       DECLARE_ARGS(val, low, high);
-
-       asm volatile("2: rdmsr ; xor %[err],%[err]\n"
-                    "1:\n\t"
-                    ".section .fixup,\"ax\"\n\t"
-                    "3:  mov %[fault],%[err] ; jmp 1b\n\t"
-                    ".previous\n\t"
-                    _ASM_EXTABLE(2b, 3b)
-                    : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
-                    : "c" (msr), [fault] "i" (-EIO));
-       return EAX_EDX_VAL(val, low, high);
-}
-
-static inline void native_write_msr(unsigned int msr,
-                                   unsigned low, unsigned high)
-{
-       asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
-}
-
-/* Can be uninlined because referenced by paravirt */
-notrace static inline int native_write_msr_safe(unsigned int msr,
-                                       unsigned low, unsigned high)
-{
-       int err;
-       asm volatile("2: wrmsr ; xor %[err],%[err]\n"
-                    "1:\n\t"
-                    ".section .fixup,\"ax\"\n\t"
-                    "3:  mov %[fault],%[err] ; jmp 1b\n\t"
-                    ".previous\n\t"
-                    _ASM_EXTABLE(2b, 3b)
-                    : [err] "=a" (err)
-                    : "c" (msr), "0" (low), "d" (high),
-                      [fault] "i" (-EIO)
-                    : "memory");
-       return err;
-}
+extern unsigned long long native_read_msr(unsigned int msr);
+extern unsigned long long native_read_msr_safe(unsigned int msr, int *err);
+extern int native_write_msr_safe(unsigned int msr, unsigned low, unsigned 
high);
+extern void native_write_msr(unsigned int msr, unsigned low, unsigned high);
 
 extern unsigned long long native_read_tsc(void);
 
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 4362373..dc857b8 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -108,3 +108,55 @@ int msr_clear_bit(u32 msr, u8 bit)
 {
        return __flip_bit(msr, bit, false);
 }
+
+inline unsigned long long native_read_msr(unsigned int msr)
+{
+       DECLARE_ARGS(val, low, high);
+
+       asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
+       return EAX_EDX_VAL(val, low, high);
+}
+EXPORT_SYMBOL(native_read_msr);
+
+inline unsigned long long native_read_msr_safe(unsigned int msr,
+                                                     int *err)
+{
+       DECLARE_ARGS(val, low, high);
+
+       asm volatile("2: rdmsr ; xor %[err],%[err]\n"
+                    "1:\n\t"
+                    ".section .fixup,\"ax\"\n\t"
+                    "3:  mov %[fault],%[err] ; jmp 1b\n\t"
+                    ".previous\n\t"
+                    _ASM_EXTABLE(2b, 3b)
+                    : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+                    : "c" (msr), [fault] "i" (-EIO));
+       return EAX_EDX_VAL(val, low, high);
+}
+EXPORT_SYMBOL(native_read_msr_safe);
+
+inline void native_write_msr(unsigned int msr,
+                                   unsigned low, unsigned high)
+{
+       asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
+}
+EXPORT_SYMBOL(native_write_msr);
+
+/* Can be uninlined because referenced by paravirt */
+notrace inline int native_write_msr_safe(unsigned int msr,
+                                       unsigned low, unsigned high)
+{
+       int err;
+       asm volatile("2: wrmsr ; xor %[err],%[err]\n"
+                    "1:\n\t"
+                    ".section .fixup,\"ax\"\n\t"
+                    "3:  mov %[fault],%[err] ; jmp 1b\n\t"
+                    ".previous\n\t"
+                    _ASM_EXTABLE(2b, 3b)
+                    : [err] "=a" (err)
+                    : "c" (msr), "0" (low), "d" (high),
+                      [fault] "i" (-EIO)
+                    : "memory");
+       return err;
+}
+EXPORT_SYMBOL(native_write_msr_safe);
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to