From: Ben Gardon <bgar...@google.com>

[ Upstream commit 1af4a96025b33587ca953c7ef12a1b20c6e70412 ]

Given certain conditions, some TDP MMU functions may not yield
reliably / frequently enough. For example, if a paging structure was
very large but had few, if any writable entries, wrprot_gfn_range
could traverse many entries before finding a writable entry and yielding
because the check for yielding only happens after an SPTE is modified.

Fix this issue by moving the yield to the beginning of the loop.

Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU")
Reviewed-by: Peter Feiner <pfei...@google.com>
Signed-off-by: Ben Gardon <bgar...@google.com>

Message-Id: <20210202185734.1680553-15-bgar...@google.com>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 32 ++++++++++++++++++++++----------
 1 file changed, 22 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index a07d37abb63f..0567286fba39 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -470,6 +470,12 @@ static bool zap_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
        bool flush_needed = false;
 
        tdp_root_for_each_pte(iter, root, start, end) {
+               if (can_yield &&
+                   tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
+                       flush_needed = false;
+                       continue;
+               }
+
                if (!is_shadow_present_pte(iter.old_spte))
                        continue;
 
@@ -484,9 +490,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
                        continue;
 
                tdp_mmu_set_spte(kvm, &iter, 0);
-
-               flush_needed = !(can_yield &&
-                                tdp_mmu_iter_cond_resched(kvm, &iter, true));
+               flush_needed = true;
        }
        return flush_needed;
 }
@@ -850,6 +854,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
 
        for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
                                   min_level, start, end) {
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+                       continue;
+
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
@@ -858,8 +865,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
 
                tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
                spte_set = true;
-
-               tdp_mmu_iter_cond_resched(kvm, &iter, false);
        }
        return spte_set;
 }
@@ -903,6 +908,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
        bool spte_set = false;
 
        tdp_root_for_each_leaf_pte(iter, root, start, end) {
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+                       continue;
+
                if (spte_ad_need_write_protect(iter.old_spte)) {
                        if (is_writable_pte(iter.old_spte))
                                new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
@@ -917,8 +925,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
 
                tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
                spte_set = true;
-
-               tdp_mmu_iter_cond_resched(kvm, &iter, false);
        }
        return spte_set;
 }
@@ -1026,6 +1032,9 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
        bool spte_set = false;
 
        tdp_root_for_each_pte(iter, root, start, end) {
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+                       continue;
+
                if (!is_shadow_present_pte(iter.old_spte))
                        continue;
 
@@ -1033,8 +1042,6 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct 
kvm_mmu_page *root,
 
                tdp_mmu_set_spte(kvm, &iter, new_spte);
                spte_set = true;
-
-               tdp_mmu_iter_cond_resched(kvm, &iter, false);
        }
 
        return spte_set;
@@ -1075,6 +1082,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
        bool spte_set = false;
 
        tdp_root_for_each_pte(iter, root, start, end) {
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
+                       spte_set = false;
+                       continue;
+               }
+
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
@@ -1087,7 +1099,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
 
                tdp_mmu_set_spte(kvm, &iter, 0);
 
-               spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true);
+               spte_set = true;
        }
 
        if (spte_set)
-- 
2.30.2



Reply via email to