Introduce a common function to abstract spte write-protect to
cleanup the code

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c |   60 ++++++++++++++++++++++++++++++---------------------
 1 files changed, 35 insertions(+), 25 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7589e56..a1c3628 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1052,6 +1052,34 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
                rmap_remove(kvm, sptep);
 }

+/* Return true if the spte is dropped. */
+static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool large,
+                              bool *flush)
+{
+       u64 spte = *sptep;
+
+       if (!is_writable_pte(spte))
+               return false;
+
+       *flush |= true;
+
+       if (large) {
+               pgprintk("rmap_write_protect(large): spte %p %llx\n",
+                        spte, *spte);
+               BUG_ON(!is_large_pte(spte));
+
+               drop_spte(kvm, sptep);
+               --kvm->stat.lpages;
+               return true;
+       }
+
+       rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
+       spte = spte & ~PT_WRITABLE_MASK;
+       mmu_spte_update(sptep, spte);
+
+       return false;
+}
+
 static bool
 __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
 {
@@ -1060,24 +1088,13 @@ __rmap_write_protect(struct kvm *kvm, unsigned long 
*rmapp, int level)
        bool write_protected = false;

        for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
-               rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, 
*sptep);
-
-               if (!is_writable_pte(*sptep)) {
-                       sptep = rmap_get_next(&iter);
-                       continue;
-               }
-
-               if (level == PT_PAGE_TABLE_LEVEL) {
-                       mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK);
-                       sptep = rmap_get_next(&iter);
-               } else {
-                       BUG_ON(!is_large_pte(*sptep));
-                       drop_spte(kvm, sptep);
-                       --kvm->stat.lpages;
+               if (spte_write_protect(kvm, sptep, level > PT_PAGE_TABLE_LEVEL,
+                         &write_protected)) {
                        sptep = rmap_get_first(*rmapp, &iter);
+                       continue;
                }

-               write_protected = true;
+               sptep = rmap_get_next(&iter);
        }

        return write_protected;
@@ -3898,6 +3915,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
+       bool flush = false;

        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
@@ -3912,16 +3930,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 
int slot)
                              !is_last_spte(pt[i], sp->role.level))
                                continue;

-                       if (is_large_pte(pt[i])) {
-                               drop_spte(kvm, &pt[i]);
-                               --kvm->stat.lpages;
-                               continue;
-                       }
-
-                       /* avoid RMW */
-                       if (is_writable_pte(pt[i]))
-                               mmu_spte_update(&pt[i],
-                                               pt[i] & ~PT_WRITABLE_MASK);
+                       spte_write_protect(kvm, &pt[i],
+                                          is_large_pte(pt[i]), &flush);
                }
        }
        kvm_flush_remote_tlbs(kvm);
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to