This patch introduces, and patch callers when needed, native
versions for read/write_crX functions, clts and wbinvd.

Signed-off-by: Glauber de Oliveira Costa <[EMAIL PROTECTED]>
Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>
Acked-by: Jeremy Fitzhardinge <[EMAIL PROTECTED]>
---
 arch/x86/mm/pageattr_64.c   |    3 +-
 include/asm-x86/system_64.h |   60 ++++++++++++++++++++++++++++++------------
 2 files changed, 45 insertions(+), 18 deletions(-)

diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index c40afba..59a52b0 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -12,6 +12,7 @@
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
 #include <asm/io.h>
+#include <asm/paravirt.h>
 
 pte_t *lookup_address(unsigned long address)
 { 
@@ -77,7 +78,7 @@ static void flush_kernel_map(void *arg)
           much cheaper than WBINVD. */
        /* clflush is still broken. Disable for now. */
        if (1 || !cpu_has_clflush)
-               asm volatile("wbinvd" ::: "memory");
+               wbinvd();
        else list_for_each_entry(pg, l, lru) {
                void *adr = page_address(pg);
                clflush_cache_range(adr, PAGE_SIZE);
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 4cb2384..b558cb2 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -65,53 +65,62 @@ extern void load_gs_index(unsigned);
 /*
  * Clear and set 'TS' bit respectively
  */
-#define clts() __asm__ __volatile__ ("clts")
+static inline void native_clts(void)
+{
+       asm volatile ("clts");
+}
 
-static inline unsigned long read_cr0(void)
-{ 
+static inline unsigned long native_read_cr0(void)
+{
        unsigned long cr0;
        asm volatile("movq %%cr0,%0" : "=r" (cr0));
        return cr0;
 }
 
-static inline void write_cr0(unsigned long val) 
-{ 
+static inline void native_write_cr0(unsigned long val)
+{
        asm volatile("movq %0,%%cr0" :: "r" (val));
 }
 
-static inline unsigned long read_cr2(void)
+static inline unsigned long native_read_cr2(void)
 {
        unsigned long cr2;
        asm volatile("movq %%cr2,%0" : "=r" (cr2));
        return cr2;
 }
 
-static inline void write_cr2(unsigned long val)
+static inline void native_write_cr2(unsigned long val)
 {
        asm volatile("movq %0,%%cr2" :: "r" (val));
 }
 
-static inline unsigned long read_cr3(void)
-{ 
+static inline unsigned long native_read_cr3(void)
+{
        unsigned long cr3;
        asm volatile("movq %%cr3,%0" : "=r" (cr3));
        return cr3;
 }
 
-static inline void write_cr3(unsigned long val)
+static inline void native_write_cr3(unsigned long val)
 {
        asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
 }
 
-static inline unsigned long read_cr4(void)
-{ 
+static inline unsigned long native_read_cr4(void)
+{
        unsigned long cr4;
        asm volatile("movq %%cr4,%0" : "=r" (cr4));
        return cr4;
 }
 
-static inline void write_cr4(unsigned long val)
-{ 
+static inline unsigned long native_read_cr4_safe(void)
+{
+       /* CR4 always exist */
+       return native_read_cr4();
+}
+
+static inline void native_write_cr4(unsigned long val)
+{
        asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
 }
 
@@ -127,10 +136,27 @@ static inline void write_cr8(unsigned long val)
        asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
 }
 
-#define stts() write_cr0(8 | read_cr0())
+static inline void native_wbinvd(void)
+{
+       asm volatile("wbinvd" ::: "memory");
+}
 
-#define wbinvd() \
-       __asm__ __volatile__ ("wbinvd": : :"memory")
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define clts           native_clts
+#define wbinvd         native_wbinvd
+#define read_cr0       native_read_cr0
+#define read_cr2       native_read_cr2
+#define read_cr3       native_read_cr3
+#define read_cr4       native_read_cr4
+#define write_cr0      native_write_cr0
+#define write_cr2      native_write_cr2
+#define write_cr3      native_write_cr3
+#define write_cr4      native_write_cr4
+#endif
+
+#define stts() write_cr0(8 | read_cr0())
 
 #endif /* __KERNEL__ */
 
-- 
1.4.4.2


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to