From: Ben Gardon <bgar...@google.com>

[ Upstream commit e28a436ca4f65384cceaf3f4da0e00aa74244e6a ]

Currently the TDP MMU yield / cond_resched functions either return
nothing or return true if the TLBs were not flushed. These are confusing
semantics, especially when making control flow decisions in calling
functions.

To clean things up, change both functions to have the same
return value semantics as cond_resched: true if the thread yielded,
false if it did not. If the function yielded in the _flush_ version,
then the TLBs will have been flushed.

Reviewed-by: Peter Feiner <pfei...@google.com>
Acked-by: Paolo Bonzini <pbonz...@redhat.com>
Signed-off-by: Ben Gardon <bgar...@google.com>
Message-Id: <20210202185734.1680553-2-bgar...@google.com>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 39 ++++++++++++++++++++++++++++----------
 1 file changed, 29 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 17976998bffb..abdd89771b9b 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -413,8 +413,15 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct 
kvm *kvm,
                         _mmu->shadow_root_level, _start, _end)
 
 /*
- * Flush the TLB if the process should drop kvm->mmu_lock.
- * Return whether the caller still needs to flush the tlb.
+ * Flush the TLB and yield if the MMU lock is contended or this thread needs to
+ * return control to the scheduler.
+ *
+ * If this function yields, it will also reset the tdp_iter's walk over the
+ * paging structure and the calling function should allow the iterator to
+ * continue its traversal from the paging structure root.
+ *
+ * Return true if this function yielded, the TLBs were flushed, and the
+ * iterator's traversal was reset. Return false if a yield was not needed.
  */
 static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter 
*iter)
 {
@@ -422,18 +429,32 @@ static bool tdp_mmu_iter_flush_cond_resched(struct kvm 
*kvm, struct tdp_iter *it
                kvm_flush_remote_tlbs(kvm);
                cond_resched_lock(&kvm->mmu_lock);
                tdp_iter_refresh_walk(iter);
-               return false;
-       } else {
                return true;
        }
+
+       return false;
 }
 
-static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
+/*
+ * Yield if the MMU lock is contended or this thread needs to return control
+ * to the scheduler.
+ *
+ * If this function yields, it will also reset the tdp_iter's walk over the
+ * paging structure and the calling function should allow the iterator to
+ * continue its traversal from the paging structure root.
+ *
+ * Return true if this function yielded and the iterator's traversal was reset.
+ * Return false if a yield was not needed.
+ */
+static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
 {
        if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
                cond_resched_lock(&kvm->mmu_lock);
                tdp_iter_refresh_walk(iter);
+               return true;
        }
+
+       return false;
 }
 
 /*
@@ -469,10 +490,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
 
                tdp_mmu_set_spte(kvm, &iter, 0);
 
-               if (can_yield)
-                       flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, 
&iter);
-               else
-                       flush_needed = true;
+               flush_needed = !can_yield ||
+                              !tdp_mmu_iter_flush_cond_resched(kvm, &iter);
        }
        return flush_needed;
 }
@@ -1073,7 +1092,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
 
                tdp_mmu_set_spte(kvm, &iter, 0);
 
-               spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
+               spte_set = !tdp_mmu_iter_flush_cond_resched(kvm, &iter);
        }
 
        if (spte_set)
-- 
2.30.2



Reply via email to