If this bit is set, it means the W bit of the spte is cleared due
to shadow page table protection

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c |   56 ++++++++++++++++++++++++++++++++++-----------------
 1 files changed, 37 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dd984b6..eb02fc4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -147,6 +147,7 @@ module_param(dbg, bool, 0644);

 #define SPTE_HOST_WRITEABLE    (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
 #define SPTE_ALLOW_WRITE       (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
+#define SPTE_WRITE_PROTECT     (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 2))

 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)

@@ -1042,36 +1043,51 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
                rmap_remove(kvm, sptep);
 }

+static bool spte_wp_by_dirty_log(u64 spte)
+{
+       WARN_ON(is_writable_pte(spte));
+
+       return (spte & SPTE_ALLOW_WRITE) && !(spte & SPTE_WRITE_PROTECT);
+}
+
 /* Return true if the spte is dropped. */
 static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool large,
-                              bool *flush)
+                              bool *flush, bool page_table_protect)
 {
        u64 spte = *sptep;

-       if (!is_writable_pte(spte))
-               return false;
+       if (is_writable_pte(spte)) {
+               *flush |= true;

-       *flush |= true;
+               if (large) {
+                       pgprintk("rmap_write_protect(large): spte %p %llx\n",
+                                spte, *spte);
+                       BUG_ON(!is_large_pte(spte));

-       if (large) {
-               pgprintk("rmap_write_protect(large): spte %p %llx\n",
-                        spte, *spte);
-               BUG_ON(!is_large_pte(spte));
+                       drop_spte(kvm, sptep);
+                       --kvm->stat.lpages;
+                       return true;
+               }

-               drop_spte(kvm, sptep);
-               --kvm->stat.lpages;
-               return true;
+               goto reset_spte;
        }

+       if (page_table_protect && spte_wp_by_dirty_log(spte))
+               goto reset_spte;
+
+       return false;
+
+reset_spte:
        rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
        spte = spte & ~PT_WRITABLE_MASK;
+       if (page_table_protect)
+               spte |= SPTE_WRITE_PROTECT;
        mmu_spte_update(sptep, spte);
-
        return false;
 }

-static bool
-__rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
+static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
+                                int level, bool page_table_protect)
 {
        u64 *sptep;
        struct rmap_iterator iter;
@@ -1080,7 +1096,7 @@ __rmap_write_protect(struct kvm *kvm, unsigned long 
*rmapp, int level)
        for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
                BUG_ON(!(*sptep & PT_PRESENT_MASK));
                if (spte_write_protect(kvm, sptep, level > PT_PAGE_TABLE_LEVEL,
-                         &write_protected)) {
+                         &write_protected, page_table_protect)) {
                        sptep = rmap_get_first(*rmapp, &iter);
                        continue;
                }
@@ -1109,7 +1125,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,

        while (mask) {
                rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
-               __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
+               __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL, false);

                /* clear the first set bit */
                mask &= mask - 1;
@@ -1128,7 +1144,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
        for (i = PT_PAGE_TABLE_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
                rmapp = __gfn_to_rmap(gfn, i, slot);
-               write_protected |= __rmap_write_protect(kvm, rmapp, i);
+               write_protected |= __rmap_write_protect(kvm, rmapp, i, true);
        }

        return write_protected;
@@ -1179,7 +1195,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned 
long *rmapp,
                        new_spte |= (u64)new_pfn << PAGE_SHIFT;

                        new_spte &= ~(PT_WRITABLE_MASK | SPTE_HOST_WRITEABLE |
-                                     shadow_accessed_mask | SPTE_ALLOW_WRITE);
+                                     shadow_accessed_mask | SPTE_ALLOW_WRITE |
+                                     SPTE_WRITE_PROTECT);

                        mmu_spte_clear_track_bits(sptep);
                        mmu_spte_set(sptep, new_spte);
@@ -2346,6 +2363,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        ret = 1;
                        pte_access &= ~ACC_WRITE_MASK;
                        spte &= ~PT_WRITABLE_MASK;
+                       spte |= SPTE_WRITE_PROTECT;
                }
        }

@@ -3934,7 +3952,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 
int slot)
                                continue;

                        spte_write_protect(kvm, &pt[i],
-                                          is_large_pte(pt[i]), &flush);
+                                          is_large_pte(pt[i]), &flush, false);
                }
        }
        kvm_flush_remote_tlbs(kvm);
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to