commit 15aaa819e20cb183f26392ed8ea16020630ef142 broke large page handling.
With large pages it is valid to enter mmu_set_spte() with a gfn different than what current shadow pte contains, when overwritting a PTE page pointer. Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 95c12bc..9cd75ee 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1047,25 +1047,24 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, write_fault, user_fault, gfn); if (is_rmap_pte(*shadow_pte)) { - if (page != spte_to_page(*shadow_pte)) - rmap_remove(vcpu->kvm, shadow_pte); - else - was_rmapped = 1; - } - - /* - * If we overwrite a PTE page pointer with a 2MB PMD, unlink - * the parent of the now unreachable PTE. - */ - if (largepage) { - if (was_rmapped && !is_large_pte(*shadow_pte)) { + /* + * If we overwrite a PTE page pointer with a 2MB PMD, unlink + * the parent of the now unreachable PTE. + */ + if (largepage && !is_large_pte(*shadow_pte)) { struct kvm_mmu_page *child; u64 pte = *shadow_pte; child = page_header(pte & PT64_BASE_ADDR_MASK); mmu_page_remove_parent_pte(child, shadow_pte); + } else if (page != spte_to_page(*shadow_pte)) + rmap_remove(vcpu->kvm, shadow_pte); + else { + if (largepage) + was_rmapped = is_large_pte(*shadow_pte); + else + was_rmapped = 1; } - was_rmapped = is_large_pte(*shadow_pte); } /* ------------------------------------------------------------------------- Check out the new SourceForge.net Marketplace. It's the best place to buy or sell services for just about anything Open Source. http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace _______________________________________________ kvm-devel mailing list kvm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/kvm-devel