Replace a variety of open coded GPA checks with the recently introduced
common helpers.

No functional change intended.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/x86/kvm/vmx/nested.c | 26 +++++++-------------------
 arch/x86/kvm/vmx/vmx.c    |  2 +-
 2 files changed, 8 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b14fc19ceb36..b25ce704a2aa 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -775,8 +775,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu 
*vcpu,
           (CC(!nested_cpu_has_vid(vmcs12)) ||
            CC(!nested_exit_intr_ack_set(vcpu)) ||
            CC((vmcs12->posted_intr_nv & 0xff00)) ||
-           CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
-           CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
+           CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, 
vmcs12->posted_intr_desc_addr, 64))))
                return -EINVAL;
 
        /* tpr shadow is needed by all apicv features. */
@@ -789,13 +788,11 @@ static int nested_vmx_check_apicv_controls(struct 
kvm_vcpu *vcpu,
 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
                                       u32 count, u64 addr)
 {
-       int maxphyaddr;
-
        if (count == 0)
                return 0;
-       maxphyaddr = cpuid_maxphyaddr(vcpu);
-       if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
-           (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
+
+       if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
+           !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct 
vmx_msr_entry) - 1)))
                return -EINVAL;
 
        return 0;
@@ -1093,14 +1090,6 @@ static void prepare_vmx_msr_autostore_list(struct 
kvm_vcpu *vcpu,
        }
 }
 
-static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
-       unsigned long invalid_mask;
-
-       invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
-       return (val & invalid_mask) == 0;
-}
-
 /*
  * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
  * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't
@@ -1152,7 +1141,7 @@ static bool nested_vmx_transition_mmu_sync(struct 
kvm_vcpu *vcpu)
 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool 
nested_ept,
                               enum vm_entry_failure_code *entry_failure_code)
 {
-       if (CC(!nested_cr3_valid(vcpu, cr3))) {
+       if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
                *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
        }
@@ -2666,7 +2655,6 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 
*vmcs12)
 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
        /* Check for memory type validity */
        switch (new_eptp & VMX_EPTP_MT_MASK) {
@@ -2697,7 +2685,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, 
u64 new_eptp)
        }
 
        /* Reserved bits should not be set */
-       if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f)))
+       if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 
0x1f)))
                return false;
 
        /* AD, if set, should be supported */
@@ -2881,7 +2869,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu 
*vcpu,
 
        if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
            CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
-           CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
+           CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
                return -EINVAL;
 
        if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index beb5a912014d..cbeb0748f25f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1114,7 +1114,7 @@ static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
 {
        /* The base must be 128-byte aligned and a legal physical address. */
-       return !kvm_vcpu_is_illegal_gpa(vcpu, base) && !(base & 0x7f);
+       return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
 }
 
 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
-- 
2.30.0.365.g02bc693789-goog

Reply via email to