Since we need to make the check atomic, move it to the place that will
set the new spte.

Signed-off-by: Avi Kivity <a...@redhat.com>
---
 arch/x86/kvm/mmu.c |   17 +++++++++--------
 1 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 17331c2..f93948d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -620,19 +620,11 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
        struct kvm_mmu_page *sp;
-       pfn_t pfn;
        gfn_t gfn;
        unsigned long *rmapp;
        int i;
 
-       if (!is_rmap_spte(*spte))
-               return;
        sp = page_header(__pa(spte));
-       pfn = spte_to_pfn(*spte);
-       if (*spte & shadow_accessed_mask)
-               kvm_set_pfn_accessed(pfn);
-       if (is_writable_pte(*spte))
-               kvm_set_pfn_dirty(pfn);
        gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
        rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
        if (!*rmapp) {
@@ -668,6 +660,15 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
 
 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
 {
+       pfn_t pfn;
+
+       if (!is_rmap_spte(*sptep))
+               return;
+       pfn = spte_to_pfn(*sptep);
+       if (*sptep & shadow_accessed_mask)
+               kvm_set_pfn_accessed(pfn);
+       if (is_writable_pte(*sptep))
+               kvm_set_pfn_dirty(pfn);
        rmap_remove(kvm, sptep);
        __set_spte(sptep, new_spte);
 }
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to