In shadow page table, only leaf SPs may be marked as unsync.
And for non-leaf SPs, we use unsync_children to keep the number
of the unsynced children. In kvm_mmu_sync_root(), sp->unsync
shall always be zero for the root SP, , hence no need to check
it. Instead, a warning inside mmu_sync_children() is added, in
case someone incorrectly used it.

Also, clarify the mmu_need_write_protect(), by moving the warning
into kvm_unsync_page().

Signed-off-by: Yu Zhang <yu.c.zh...@linux.intel.com>
Signed-off-by: Sean Christopherson <sea...@google.com>
---
Changes in V2:
- warnings added based on Sean's suggestion.

 arch/x86/kvm/mmu/mmu.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 86af582..c4797a00cc 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1995,6 +1995,12 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
        LIST_HEAD(invalid_list);
        bool flush = false;
 
+       /*
+        * Only 4k SPTEs can directly be made unsync, the parent pages
+        * should never be unsyc'd.
+        */
+       WARN_ON_ONCE(sp->unsync);
+
        while (mmu_unsync_walk(parent, &pages)) {
                bool protected = false;
 
@@ -2502,6 +2508,8 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 
 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
+       WARN_ON(sp->role.level != PG_LEVEL_4K);
+
        trace_kvm_mmu_unsync_page(sp);
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
@@ -2524,7 +2532,6 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t 
gfn,
                if (sp->unsync)
                        continue;
 
-               WARN_ON(sp->role.level != PG_LEVEL_4K);
                kvm_unsync_page(vcpu, sp);
        }
 
@@ -3406,8 +3413,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                 * mmu_need_write_protect() describe what could go wrong if this
                 * requirement isn't satisfied.
                 */
-               if (!smp_load_acquire(&sp->unsync) &&
-                   !smp_load_acquire(&sp->unsync_children))
+               if (!smp_load_acquire(&sp->unsync_children))
                        return;
 
                write_lock(&vcpu->kvm->mmu_lock);
-- 
1.9.1

Reply via email to