Change recurring pattern

  leave_guest_mode(vcpu);
  vmx_load_vmcs01(vcpu);
  nested_vmx_entry_failure(vcpu, ...);
  return 1;

into

  return nested_vmx_entry_failure(vcpu, ...)

Signed-off-by: Radim Krčmář <[email protected]>
---
 arch/x86/kvm/vmx.c | 46 ++++++++++++++++------------------------------
 1 file changed, 16 insertions(+), 30 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 050899431b5e..a74cde40e349 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1398,7 +1398,7 @@ static inline bool is_nmi(u32 intr_info)
 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                              u32 exit_intr_info,
                              unsigned long exit_qualification);
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+static int nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
                        struct vmcs12 *vmcs12,
                        u32 reason, unsigned long qualification);
 
@@ -10529,20 +10529,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool 
launch)
        vmx_segment_cache_clear(vmx);
 
        if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
-           !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
-               leave_guest_mode(vcpu);
-               vmx_load_vmcs01(vcpu);
-               nested_vmx_entry_failure(vcpu, vmcs12,
+           !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
+               return nested_vmx_entry_failure(vcpu, vmcs12,
                        EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-               return 1;
-       }
-       if (vmcs12->vmcs_link_pointer != -1ull) {
-               leave_guest_mode(vcpu);
-               vmx_load_vmcs01(vcpu);
-               nested_vmx_entry_failure(vcpu, vmcs12,
+
+       if (vmcs12->vmcs_link_pointer != -1ull)
+               return nested_vmx_entry_failure(vcpu, vmcs12,
                        EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
-               return 1;
-       }
 
        /*
         * If the load IA32_EFER VM-entry control is 1, the following checks
@@ -10559,32 +10552,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool 
launch)
                    ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
                    ((vmcs12->guest_cr0 & X86_CR0_PG) &&
                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
-                       leave_guest_mode(vcpu);
-                       vmx_load_vmcs01(vcpu);
-                       nested_vmx_entry_failure(vcpu, vmcs12,
+                       return nested_vmx_entry_failure(vcpu, vmcs12,
                                EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-                       return 1;
                }
        }
 
-       if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) {
-               leave_guest_mode(vcpu);
-               vmx_load_vmcs01(vcpu);
-               nested_vmx_entry_failure(vcpu, vmcs12,
+       if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification))
+               return nested_vmx_entry_failure(vcpu, vmcs12,
                                EXIT_REASON_INVALID_STATE, exit_qualification);
-               return 1;
-       }
 
        msr_entry_idx = nested_vmx_load_msr(vcpu,
                                            vmcs12->vm_entry_msr_load_addr,
                                            vmcs12->vm_entry_msr_load_count);
-       if (msr_entry_idx) {
-               leave_guest_mode(vcpu);
-               vmx_load_vmcs01(vcpu);
-               nested_vmx_entry_failure(vcpu, vmcs12,
+       if (msr_entry_idx)
+               return nested_vmx_entry_failure(vcpu, vmcs12,
                                EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
-               return 1;
-       }
 
        vmcs12->launch_state = 1;
 
@@ -11173,16 +11155,20 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu)
  * It should only be called before L2 actually succeeded to run, and when
  * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
  */
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+static int nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
                        struct vmcs12 *vmcs12,
                        u32 reason, unsigned long qualification)
 {
+       leave_guest_mode(vcpu);
+       vmx_load_vmcs01(vcpu);
        load_vmcs12_host_state(vcpu, vmcs12);
        vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
        vmcs12->exit_qualification = qualification;
        nested_vmx_succeed(vcpu);
        if (enable_shadow_vmcs)
                to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
+
+       return 1;
 }
 
 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
-- 
2.11.0

Reply via email to