Update spte before increasing tlbs_dirty to make sure no tlb flush
in lost after spte is zapped. This pairs with the barrier in the
kvm_flush_remote_tlbs().

Signed-off-by: Lan Tianyu <tianyu....@intel.com>
---
 arch/x86/kvm/paging_tmpl.h | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e159a81..d34475e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -949,6 +949,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
                        return 0;
 
                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
+                       /*
+                        * Update spte before increasing tlbs_dirty to make sure
+                        * no tlb flush in lost after spte is zapped, see the
+                        * comments in kvm_flush_remote_tlbs().
+                        */
+                       smp_wmb();
                        vcpu->kvm->tlbs_dirty++;
                        continue;
                }
@@ -964,6 +970,11 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
 
                if (gfn != sp->gfns[i]) {
                        drop_spte(vcpu->kvm, &sp->spt[i]);
+                       /*
+                        * The same as above where we are doing
+                        * prefetch_invalid_gpte().
+                        */
+                       smp_wmb();
                        vcpu->kvm->tlbs_dirty++;
                        continue;
                }
-- 
1.8.4.rc0.1.g8f6a3e5.dirty

Reply via email to