If the shadow page has different cpu mode with current vcpu, we do better
remove them since the OS does not changes cpu mode frequently

Signed-off-by: Xiao Guangrong <xiaoguangr...@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |   26 +++++++++++++++++++-------
 1 files changed, 19 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 931c23a..2328ee6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3603,6 +3603,18 @@ static bool detect_write_misaligned(struct kvm_mmu_page 
*sp, gpa_t gpa,
        return misaligned;
 }
 
+/*
+ * The OS hardly changes cpu mode after boot, we can zap the shadow page if
+ * it is mismatched with the current vcpu.
+ */
+static bool detect_mismatch_sp(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       union kvm_mmu_page_role mask;
+
+       mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
+       return (sp->role.word ^ vcpu->arch.mmu.base_role.word) & mask.word;
+}
+
 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
 {
        unsigned page_offset, quadrant;
@@ -3638,13 +3650,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                       const u8 *new, int bytes, bool repeat_write)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
-       union kvm_mmu_page_role mask = { .word = 0 };
        struct kvm_mmu_page *sp;
        struct hlist_node *node;
        LIST_HEAD(invalid_list);
        u64 entry, gentry, *spte;
        int npte;
-       bool remote_flush, local_flush, zap_page, flooded, misaligned;
+       bool remote_flush, local_flush, zap_page, flooded;
 
        /*
         * If we don't have indirect shadow pages, it means no page is
@@ -3664,10 +3675,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
        flooded = detect_write_flooding(vcpu, gfn);
-       mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
        for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
+               bool mismatch, misaligned;
+
                misaligned = detect_write_misaligned(sp, gpa, bytes);
-               if (misaligned || flooded || repeat_write) {
+               mismatch = detect_mismatch_sp(vcpu, sp);
+
+               if (misaligned || mismatch || flooded || repeat_write) {
                        zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
                                                     &invalid_list);
                        ++vcpu->kvm->stat.mmu_flooded;
@@ -3682,9 +3696,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                while (npte--) {
                        entry = *spte;
                        mmu_page_zap_pte(vcpu->kvm, sp, spte);
-                       if (gentry &&
-                             !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
-                             & mask.word) && get_free_pte_list_desc_nr(vcpu))
+                       if (gentry && get_free_pte_list_desc_nr(vcpu))
                                mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
                        if (!remote_flush && need_remote_flush(entry, *spte))
                                remote_flush = true;
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to