Don't bother filling the gfn array cache when the caller is a fully
direct MMU, i.e. won't need a gfn array for shadow pages.

Reviewed-by: Ben Gardon <bgar...@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu/mmu.c         | 18 ++++++++++--------
 arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a8f8eebf67df..8d66cf558f1b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct 
kvm_mmu_memory_cache *mc)
        }
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
        int r;
 
@@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu 
*vcpu)
                                   PT64_ROOT_MAX_LEVEL);
        if (r)
                return r;
-       r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-                                  PT64_ROOT_MAX_LEVEL);
-       if (r)
-               return r;
+       if (maybe_indirect) {
+               r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+                                          PT64_ROOT_MAX_LEVEL);
+               if (r)
+                       return r;
+       }
        return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
                                      PT64_ROOT_MAX_LEVEL);
 }
@@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t 
gpa, u32 error_code,
        if (fast_page_fault(vcpu, gpa, error_code))
                return RET_PF_RETRY;
 
-       r = mmu_topup_memory_caches(vcpu);
+       r = mmu_topup_memory_caches(vcpu, false);
        if (r)
                return r;
 
@@ -5147,7 +5149,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
        int r;
 
-       r = mmu_topup_memory_caches(vcpu);
+       r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
        if (r)
                goto out;
        r = mmu_alloc_roots(vcpu);
@@ -5341,7 +5343,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, 
gpa_t gpa,
         * or not since pte prefetch is skiped if it does not have
         * enough objects in the cache.
         */
-       mmu_topup_memory_caches(vcpu);
+       mmu_topup_memory_caches(vcpu, true);
 
        spin_lock(&vcpu->kvm->mmu_lock);
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 3de32122f601..ac39710d0594 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -818,7 +818,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t 
addr, u32 error_code,
                return RET_PF_EMULATE;
        }
 
-       r = mmu_topup_memory_caches(vcpu);
+       r = mmu_topup_memory_caches(vcpu, true);
        if (r)
                return r;
 
@@ -905,7 +905,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, 
hpa_t root_hpa)
         * No need to check return value here, rmap_can_add() can
         * help us to skip pte prefetch later.
         */
-       mmu_topup_memory_caches(vcpu);
+       mmu_topup_memory_caches(vcpu, true);
 
        if (!VALID_PAGE(root_hpa)) {
                WARN_ON(1);
-- 
2.26.0

Reply via email to