__update_clear_spte_slow should return original spte while the
current code returns low half of original spte combined with high
half of new spte.

Signed-off-by: Zhao Jin <crono...@gmail.com>
---
 arch/x86/kvm/mmu.c |    3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1c5b693..8e8da79 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -400,7 +400,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
 
        /* xchg acts as a barrier before the setting of the high bits */
        orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
-       orig.spte_high = ssptep->spte_high = sspte.spte_high;
+       orig.spte_high = ssptep->spte_high;
+       ssptep->spte_high = sspte.spte_high;
        count_spte_clear(sptep, spte);
 
        return orig.spte;
-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to