Reload remote vcpus MMU from GET_DIRTY_LOG codepath, before
deleting a pinned spte.

Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

---
 arch/x86/kvm/mmu.c |   29 +++++++++++++++++++++++------
 1 file changed, 23 insertions(+), 6 deletions(-)

Index: kvm.pinned-sptes/arch/x86/kvm/mmu.c
===================================================================
--- kvm.pinned-sptes.orig/arch/x86/kvm/mmu.c    2014-07-09 11:23:59.290744490 
-0300
+++ kvm.pinned-sptes/arch/x86/kvm/mmu.c 2014-07-09 11:24:58.449632435 -0300
@@ -1208,7 +1208,8 @@
  *
  * Return true if tlb need be flushed.
  */
-static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
+static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect,
+                              bool skip_pinned)
 {
        u64 spte = *sptep;
 
@@ -1218,6 +1219,22 @@
 
        rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
 
+       if (is_pinned_spte(spte)) {
+               /* keep pinned spte intact, mark page dirty again */
+               if (skip_pinned) {
+                       struct kvm_mmu_page *sp;
+                       gfn_t gfn;
+
+                       sp = page_header(__pa(sptep));
+                       gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
+
+                       mark_page_dirty(kvm, gfn);
+                       return false;
+               } else
+                       mmu_reload_pinned_vcpus(kvm);
+       }
+
+
        if (pt_protect)
                spte &= ~SPTE_MMU_WRITEABLE;
        spte = spte & ~PT_WRITABLE_MASK;
@@ -1226,7 +1243,7 @@
 }
 
 static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
-                                bool pt_protect)
+                                bool pt_protect, bool skip_pinned)
 {
        u64 *sptep;
        struct rmap_iterator iter;
@@ -1235,7 +1252,7 @@
        for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
                BUG_ON(!(*sptep & PT_PRESENT_MASK));
 
-               flush |= spte_write_protect(kvm, sptep, pt_protect);
+               flush |= spte_write_protect(kvm, sptep, pt_protect, 
skip_pinned);
                sptep = rmap_get_next(&iter);
        }
 
@@ -1261,7 +1278,7 @@
        while (mask) {
                rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
                                      PT_PAGE_TABLE_LEVEL, slot);
-               __rmap_write_protect(kvm, rmapp, false);
+               __rmap_write_protect(kvm, rmapp, false, true);
 
                /* clear the first set bit */
                mask &= mask - 1;
@@ -1280,7 +1297,7 @@
        for (i = PT_PAGE_TABLE_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
                rmapp = __gfn_to_rmap(gfn, i, slot);
-               write_protected |= __rmap_write_protect(kvm, rmapp, true);
+               write_protected |= __rmap_write_protect(kvm, rmapp, true, 
false);
        }
 
        return write_protected;
@@ -4565,7 +4582,7 @@
 
                for (index = 0; index <= last_index; ++index, ++rmapp) {
                        if (*rmapp)
-                               __rmap_write_protect(kvm, rmapp, false);
+                               __rmap_write_protect(kvm, rmapp, false, false);
 
                        if (need_resched() || spin_needbreak(&kvm->mmu_lock))
                                cond_resched_lock(&kvm->mmu_lock);


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to