Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
 arch/x86/kvm/mmu.c         |  115 +++++++++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/svm.c         |    6 ++
 arch/x86/kvm/vmx.c         |   24 ++++++++-
 arch/x86/kvm/x86.c         |    2 +-
 include/asm-x86/kvm_host.h |    3 +-
 5 files changed, 144 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a87a11e..2cd772a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -33,6 +33,7 @@
 #include <asm/page.h>
 #include <asm/cmpxchg.h>
 #include <asm/io.h>
+#include <asm/mtrr.h>
 
 /*
  * When setting this variable to true it enables Two-Dimensional-Paging
@@ -159,6 +160,7 @@ static u64 __read_mostly shadow_x_mask;     /* mutual 
exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
 static u64 __read_mostly shadow_accessed_mask;
 static u64 __read_mostly shadow_dirty_mask;
+static u64 __read_mostly shadow_mt_mask;
 
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
 {
@@ -174,13 +176,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
 
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-               u64 dirty_mask, u64 nx_mask, u64 x_mask)
+               u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
 {
        shadow_user_mask = user_mask;
        shadow_accessed_mask = accessed_mask;
        shadow_dirty_mask = dirty_mask;
        shadow_nx_mask = nx_mask;
        shadow_x_mask = x_mask;
+       shadow_mt_mask = mt_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
@@ -1143,6 +1146,110 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t 
gva)
        return page;
 }
 
+/*
+ * The function is based on mtrr_type_lookup() in
+ * arch/x86/kernel/cpu/mtrr/generic.c
+ */
+static int guest_mtrr_type_lookup(struct kvm_vcpu *vcpu, u64 start, u64 end)
+{
+       int i;
+       u64 base, mask;
+       u8 prev_match, curr_match;
+       int num_var_ranges = KVM_NR_VAR_MTRR;
+       struct mtrr_state_type mtrr_state = vcpu->arch.mtrr_state;
+
+       if (!mtrr_state.enabled)
+               return 0xFF;
+
+       /* Make end inclusive end, instead of exclusive */
+       end--;
+
+       /* Look in fixed ranges. Just return the type as per start */
+       if (mtrr_state.have_fixed && (start < 0x100000)) {
+               int idx;
+
+               if (start < 0x80000) {
+                       idx = 0;
+                       idx += (start >> 16);
+                       return mtrr_state.fixed_ranges[idx];
+               } else if (start < 0xC0000) {
+                       idx = 1 * 8;
+                       idx += ((start - 0x80000) >> 14);
+                       return mtrr_state.fixed_ranges[idx];
+               } else if (start < 0x1000000) {
+                       idx = 3 * 8;
+                       idx += ((start - 0xC0000) >> 12);
+                       return mtrr_state.fixed_ranges[idx];
+               }
+       }
+
+       /*
+        * Look in variable ranges
+        * Look of multiple ranges matching this address and pick type
+        * as per MTRR precedence
+        */
+       if (!(mtrr_state.enabled & 2))
+               return mtrr_state.def_type;
+
+       prev_match = 0xFF;
+       for (i = 0; i < num_var_ranges; ++i) {
+               unsigned short start_state, end_state;
+
+               if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
+                       continue;
+
+               base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
+                      (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
+               mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
+                      (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
+
+               start_state = ((start & mask) == (base & mask));
+               end_state = ((end & mask) == (base & mask));
+               if (start_state != end_state)
+                       return 0xFE;
+
+               if ((start & mask) != (base & mask))
+                       continue;
+
+               curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
+               if (prev_match == 0xFF) {
+                       prev_match = curr_match;
+                       continue;
+               }
+
+               if (prev_match == MTRR_TYPE_UNCACHABLE ||
+                   curr_match == MTRR_TYPE_UNCACHABLE)
+                       return MTRR_TYPE_UNCACHABLE;
+
+               if ((prev_match == MTRR_TYPE_WRBACK &&
+                    curr_match == MTRR_TYPE_WRTHROUGH) ||
+                   (prev_match == MTRR_TYPE_WRTHROUGH &&
+                    curr_match == MTRR_TYPE_WRBACK)) {
+                       prev_match = MTRR_TYPE_WRTHROUGH;
+                       curr_match = MTRR_TYPE_WRTHROUGH;
+               }
+
+               if (prev_match != curr_match)
+                       return MTRR_TYPE_UNCACHABLE;
+       }
+
+       if (prev_match != 0xFF)
+               return prev_match;
+
+       return mtrr_state.def_type;
+}
+
+static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       u8 mtrr;
+
+       mtrr = guest_mtrr_type_lookup(vcpu, gfn << PAGE_SHIFT,
+                      (gfn << PAGE_SHIFT) + PAGE_SIZE);
+       if (mtrr == 0xfe || mtrr == 0xff)
+               mtrr = MTRR_TYPE_WRBACK;
+       return mtrr;
+}
+
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
@@ -1152,6 +1259,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 
*shadow_pte,
        u64 spte;
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
+       u64 mt_mask = shadow_mt_mask;
 
        pgprintk("%s: spte %llx access %x write_fault %d"
                 " user_fault %d gfn %lx\n",
@@ -1199,6 +1307,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 
*shadow_pte,
                spte |= shadow_user_mask;
        if (largepage)
                spte |= PT_PAGE_SIZE_MASK;
+       if (mt_mask) {
+               mt_mask = get_memory_type(vcpu, gfn) <<
+                         kvm_x86_ops->get_mt_mask_shift();
+               spte |= mt_mask;
+       }
 
        spte |= (u64)pfn << PAGE_SHIFT;
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6f7f316..341f47e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1899,6 +1899,11 @@ static int get_npt_level(void)
 #endif
 }
 
+static int svm_get_mt_mask_shift(void)
+{
+       return 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
@@ -1954,6 +1959,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 
        .set_tss_addr = svm_set_tss_addr,
        .get_tdp_level = get_npt_level,
+       .get_mt_mask_shift = svm_get_mt_mask_shift,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b74b43f..0a86ca3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1508,10 +1508,17 @@ static void ept_update_paging_mode_cr4(unsigned long 
*hw_cr4,
                *hw_cr4 &= ~X86_CR4_PAE;
 }
 
+static void vmx_mtrr_update(struct kvm_vcpu *vcpu)
+{
+       if (vm_need_ept())
+               kvm_mmu_reset_context(vcpu);
+}
+
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) |
                                KVM_VM_CR0_ALWAYS_ON;
+       unsigned long last_cr0 = vmcs_readl(CR0_READ_SHADOW);
 
        vmx_fpu_deactivate(vcpu);
 
@@ -1539,6 +1546,11 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned 
long cr0)
 
        if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
                vmx_fpu_activate(vcpu);
+
+       /* According to SDM 10.11.8, check if need update MTRR */
+       if ((last_cr0 & X86_CR0_CD) && !(last_cr0 & X86_CR0_NW) &&
+           !(cr0 & X86_CR0_CD) && !(cr0 & X86_CR0_NW))
+               vmx_mtrr_update(vcpu);
 }
 
 static u64 construct_eptp(unsigned long root_hpa)
@@ -3481,6 +3493,11 @@ static int get_ept_level(void)
        return VMX_EPT_DEFAULT_GAW + 1;
 }
 
+static int vmx_get_mt_mask_shift(void)
+{
+       return VMX_EPT_MT_EPTE_SHIFT;
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -3536,6 +3553,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
        .set_tss_addr = vmx_set_tss_addr,
        .get_tdp_level = get_ept_level,
+       .get_mt_mask_shift = vmx_get_mt_mask_shift,
 };
 
 static int __init vmx_init(void)
@@ -3575,11 +3593,11 @@ static int __init vmx_init(void)
        if (vm_need_ept()) {
                bypass_guest_pf = 0;
                kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
-                       VMX_EPT_WRITABLE_MASK |
-                       VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
+                       VMX_EPT_WRITABLE_MASK);
                kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
                                VMX_EPT_FAKE_DIRTY_MASK, 0ull,
-                               VMX_EPT_EXECUTABLE_MASK);
+                               VMX_EPT_EXECUTABLE_MASK,
+                               VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
                kvm_enable_tdp();
        } else
                kvm_disable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5d7ea05..231d6fe 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2831,7 +2831,7 @@ int kvm_arch_init(void *opaque)
        kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
        kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
        kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
-                       PT_DIRTY_MASK, PT64_NX_MASK, 0);
+                       PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
        return 0;
 
 out:
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 2d65df2..6369f52 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -502,6 +502,7 @@ struct kvm_x86_ops {
 
        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
        int (*get_tdp_level)(void);
+       int (*get_mt_mask_shift)(void);
 };
 
 extern struct kvm_x86_ops *kvm_x86_ops;
@@ -515,7 +516,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
 void kvm_mmu_set_base_ptes(u64 base_pte);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-               u64 dirty_mask, u64 nx_mask, u64 x_mask);
+               u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask);
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-- 
1.5.4.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to