Add a helper to generate the mask of reserved PA bits in the host.

No functional change intended.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d462db3bc742..86af58294272 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4123,6 +4123,11 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu 
*vcpu,
                                    vcpu->arch.reserved_gpa_bits, execonly);
 }
 
+static inline u64 reserved_hpa_bits(void)
+{
+       return rsvd_bits(shadow_phys_bits, 63);
+}
+
 /*
  * the page table on host is the shadow page table for the page
  * table in guest or amd nested guest, its mmu features completely
@@ -4142,7 +4147,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct 
kvm_mmu *context)
         */
        shadow_zero_check = &context->shadow_zero_check;
        __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
-                               rsvd_bits(shadow_phys_bits, 63),
+                               reserved_hpa_bits(),
                                context->shadow_root_level, uses_nx,
                                guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
                                is_pse(vcpu), true);
@@ -4179,14 +4184,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
 
        if (boot_cpu_is_amd())
                __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
-                                       rsvd_bits(shadow_phys_bits, 63),
+                                       reserved_hpa_bits(),
                                        context->shadow_root_level, false,
                                        boot_cpu_has(X86_FEATURE_GBPAGES),
                                        true, true);
        else
                __reset_rsvds_bits_mask_ept(shadow_zero_check,
-                                           rsvd_bits(shadow_phys_bits, 63),
-                                           false);
+                                           reserved_hpa_bits(), false);
 
        if (!shadow_me_mask)
                return;
@@ -4206,7 +4210,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
                                struct kvm_mmu *context, bool execonly)
 {
        __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
-                                   rsvd_bits(shadow_phys_bits, 63), execonly);
+                                   reserved_hpa_bits(), execonly);
 }
 
 #define BYTE_MASK(access) \
-- 
2.30.0.365.g02bc693789-goog

Reply via email to