There is a race condition in the pte invalidation code path where we can't
be sure if a pte was invalidated already. So let's move the spin lock around
to get rid of the race.

Signed-off-by: Alexander Graf <ag...@suse.de>
---
 arch/powerpc/kvm/book3s_mmu_hpte.c |   14 ++++++++------
 1 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c 
b/arch/powerpc/kvm/book3s_mmu_hpte.c
index bd6a767..79751d8 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -92,10 +92,6 @@ static void free_pte_rcu(struct rcu_head *head)
 
 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 {
-       /* pte already invalidated? */
-       if (hlist_unhashed(&pte->list_pte))
-               return;
-
        trace_kvm_book3s_mmu_invalidate(pte);
 
        /* Different for 32 and 64 bit */
@@ -103,18 +99,24 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct 
hpte_cache *pte)
 
        spin_lock(&vcpu->arch.mmu_lock);
 
+       /* pte already invalidated in between? */
+       if (hlist_unhashed(&pte->list_pte)) {
+               spin_unlock(&vcpu->arch.mmu_lock);
+               return;
+       }
+
        hlist_del_init_rcu(&pte->list_pte);
        hlist_del_init_rcu(&pte->list_pte_long);
        hlist_del_init_rcu(&pte->list_vpte);
        hlist_del_init_rcu(&pte->list_vpte_long);
 
-       spin_unlock(&vcpu->arch.mmu_lock);
-
        if (pte->pte.may_write)
                kvm_release_pfn_dirty(pte->pfn);
        else
                kvm_release_pfn_clean(pte->pfn);
 
+       spin_unlock(&vcpu->arch.mmu_lock);
+
        vcpu->arch.hpte_cache_count--;
        call_rcu(&pte->rcu_head, free_pte_rcu);
 }
-- 
1.6.0.2

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to