The pdptrs need to be cached in addition to the shadowed root tables, so 
the guest walk can be done properly.

Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>

Index: kvm.first/arch/x86/kvm/mmu.c
===================================================================
--- kvm.first.orig/arch/x86/kvm/mmu.c
+++ kvm.first/arch/x86/kvm/mmu.c
@@ -1320,11 +1320,11 @@ static void mmu_alloc_roots(struct kvm_v
 
                ASSERT(!VALID_PAGE(root));
                if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
-                       if (!is_present_pte(vcpu->arch.pdptrs[i])) {
+                       if (!is_present_pte(vcpu->arch.pdptrs[j][i])) {
                                vcpu->arch.mmu.pae_root[j][i] = 0;
                                continue;
                        }
-                       root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
+                       root_gfn = vcpu->arch.pdptrs[j][i] >> PAGE_SHIFT;
                } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
Index: kvm.first/arch/x86/kvm/paging_tmpl.h
===================================================================
--- kvm.first.orig/arch/x86/kvm/paging_tmpl.h
+++ kvm.first/arch/x86/kvm/paging_tmpl.h
@@ -136,7 +136,8 @@ walk:
        pte = vcpu->arch.cr3;
 #if PTTYPE == 64
        if (!is_long_mode(vcpu)) {
-               pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
+               pte = vcpu->arch.pdptrs[vcpu->arch.cr3_cache_idx]
+                                      [(addr >> 30) & 3];
                if (!is_present_pte(pte))
                        goto not_present;
                --walker->level;
Index: kvm.first/arch/x86/kvm/x86.c
===================================================================
--- kvm.first.orig/arch/x86/kvm/x86.c
+++ kvm.first/arch/x86/kvm/x86.c
@@ -192,13 +192,21 @@ static void __queue_exception(struct kvm
 /*
  * Load the pae pdptrs.  Return true is they are all valid.
  */
-int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3, int cr3_cache_inc)
 {
        gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
        unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
        int i;
        int ret;
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs[0])];
+       int idx = vcpu->arch.cr3_cache_idx;
+
+       idx++;
+       if (unlikely(idx >= vcpu->arch.cr3_cache_limit))
+               idx = 0;
+
+       if (cr3_cache_inc)
+               vcpu->arch.cr3_cache_idx = idx;
 
        down_read(&vcpu->kvm->slots_lock);
        ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
@@ -215,7 +223,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, u
        }
        ret = 1;
 
-       memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
+       memcpy(vcpu->arch.pdptrs[idx], pdpte, sizeof(vcpu->arch.pdptrs[0]));
 out:
        up_read(&vcpu->kvm->slots_lock);
 
@@ -225,7 +233,7 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
 
 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
 {
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs[0])];
        bool changed = true;
        int r;
 
@@ -236,7 +244,8 @@ static bool pdptrs_changed(struct kvm_vc
        r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, 
sizeof(pdpte));
        if (r < 0)
                goto out;
-       changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
+       changed = memcmp(pdpte, vcpu->arch.pdptrs[vcpu->arch.cr3_cache_idx],
+                        sizeof(pdpte)) != 0;
 out:
        up_read(&vcpu->kvm->slots_lock);
 
@@ -286,7 +295,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, 
                        }
                } else
 #endif
-               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
+               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3, 1)) {
                        printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
                               "reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
@@ -325,7 +334,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, 
                        return;
                }
        } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
-                  && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
+                  && !load_pdptrs(vcpu, vcpu->arch.cr3, 1)) {
                printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
@@ -363,7 +372,7 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, 
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
-                       if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
+                       if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3, 0)) {
                                printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
                                       "reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
@@ -3047,7 +3056,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
        mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (!is_long_mode(vcpu) && is_pae(vcpu))
-               load_pdptrs(vcpu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.cr3, 1);
 
        if (mmu_reset_needed)
                kvm_mmu_reset_context(vcpu);
Index: kvm.first/include/asm-x86/kvm_host.h
===================================================================
--- kvm.first.orig/include/asm-x86/kvm_host.h
+++ kvm.first/include/asm-x86/kvm_host.h
@@ -211,7 +211,7 @@ struct kvm_vcpu_arch {
        unsigned int cr3_cache_limit;
        unsigned long cr4;
        unsigned long cr8;
-       u64 pdptrs[4]; /* pae */
+       u64 pdptrs[KVM_CR3_CACHE_SIZE][4]; /* pae */
        u64 shadow_efer;
        u64 apic_base;
        struct kvm_lapic *apic;    /* kernel irqchip context */
@@ -433,7 +433,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
-int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3, int cr3_cache_inc);
 
 int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
                          const void *val, int bytes);
@@ -526,7 +526,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
 
 void kvm_enable_tdp(void);
 
-int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
 int complete_pio(struct kvm_vcpu *vcpu);
 
 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
Index: kvm.first/arch/x86/kvm/svm.c
===================================================================
--- kvm.first.orig/arch/x86/kvm/svm.c
+++ kvm.first/arch/x86/kvm/svm.c
@@ -1394,7 +1394,7 @@ static int handle_exit(struct kvm_run *k
                vcpu->arch.cr0 = svm->vmcb->save.cr0;
                vcpu->arch.cr3 = svm->vmcb->save.cr3;
                if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
-                       if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
+                       if (!load_pdptrs(vcpu, vcpu->arch.cr3, 1)) {
                                kvm_inject_gp(vcpu, 0);
                                return 1;
                        }

-- 


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to