mmu_page_zap_pte() is modified to mark the TLB as dirty; but currently
only FNAME(invlpg) takes advantage of this.

Signed-off-by: Avi Kivity <a...@redhat.com>
---
 arch/x86/kvm/mmu.c         |    7 +++----
 arch/x86/kvm/paging_tmpl.h |    7 +++----
 2 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 03a9c80..0e1faec 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1895,7 +1895,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, 
u64 *sptep,
        }
 }
 
-static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
                             u64 *spte)
 {
        u64 pte;
@@ -1911,13 +1911,12 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct 
kvm_mmu_page *sp,
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        drop_parent_pte(child, spte);
                }
-               return true;
+               kvm_mark_tlb_dirty(kvm);
+               return;
        }
 
        if (is_mmio_spte(pte))
                mmu_spte_clear_no_track(spte);
-
-       return false;
 }
 
 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9fe5f72..72c9cf4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -697,10 +697,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                        pte_gpa = FNAME(get_level1_sp_gpa)(sp);
                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
-                       if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
-                               kvm_mark_tlb_dirty(vcpu->kvm);
-
-                       kvm_cond_flush_remote_tlbs(vcpu->kvm);
+                       mmu_page_zap_pte(vcpu->kvm, sp, sptep);
 
                        if (!rmap_can_add(vcpu))
                                break;
@@ -716,6 +713,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                        break;
        }
        spin_unlock(&vcpu->kvm->mmu_lock);
+
+       kvm_cond_flush_remote_tlbs(vcpu->kvm);
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
-- 
1.7.10.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to