Refactor the logic that is dealing with parsing of an injected event to a
separate function.

This will be used in the next patch to deal with the events that L1 wants to
inject to L2 in a way that survives migration.

Signed-off-by: Maxim Levitsky <mlevi...@redhat.com>
---
 arch/x86/kvm/svm/svm.c | 58 ++++++++++++++++++++++++------------------
 arch/x86/kvm/svm/svm.h |  4 +++
 2 files changed, 37 insertions(+), 25 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 941e5251e13fe..01f1655d9e6f7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3584,38 +3584,21 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu 
*vcpu)
        svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
 }
 
-static void svm_complete_interrupts(struct vcpu_svm *svm)
+void svm_process_injected_event(struct vcpu_svm *svm,
+                               u32 event,
+                               u32 event_err_code)
 {
-       u8 vector;
-       int type;
-       u32 exitintinfo = svm->vmcb->control.exit_int_info;
-       unsigned int3_injected = svm->int3_injected;
+       u8 vector = event & SVM_EXITINTINFO_VEC_MASK;
+       int type = event & SVM_EXITINTINFO_TYPE_MASK;
 
+       unsigned int int3_injected = svm->int3_injected;
        svm->int3_injected = 0;
 
-       /*
-        * If we've made progress since setting HF_IRET_MASK, we've
-        * executed an IRET and can allow NMI injection.
-        */
-       if ((svm->vcpu.arch.hflags & HF_IRET_MASK) &&
-           (sev_es_guest(svm->vcpu.kvm) ||
-            kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) {
-               svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
-               kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
-       }
-
-       svm->vcpu.arch.nmi_injected = false;
-       kvm_clear_exception_queue(&svm->vcpu);
-       kvm_clear_interrupt_queue(&svm->vcpu);
-
-       if (!(exitintinfo & SVM_EXITINTINFO_VALID))
+       if (!(event & SVM_EXITINTINFO_VALID))
                return;
 
        kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 
-       vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
-       type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
-
        switch (type) {
        case SVM_EXITINTINFO_TYPE_NMI:
                svm->vcpu.arch.nmi_injected = true;
@@ -3640,7 +3623,7 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
                                              int3_injected);
                        break;
                }
-               if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
+               if (event & SVM_EXITINTINFO_VALID_ERR) {
                        u32 err = svm->vmcb->control.exit_int_info_err;
                        kvm_requeue_exception_e(&svm->vcpu, vector, err);
 
@@ -3653,6 +3636,31 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
        default:
                break;
        }
+
+}
+
+static void svm_complete_interrupts(struct vcpu_svm *svm)
+{
+
+       /*
+        * If we've made progress since setting HF_IRET_MASK, we've
+        * executed an IRET and can allow NMI injection.
+        */
+       if ((svm->vcpu.arch.hflags & HF_IRET_MASK) &&
+           (sev_es_guest(svm->vcpu.kvm) ||
+            kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) {
+               svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
+               kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+       }
+
+       svm->vcpu.arch.nmi_injected = false;
+       kvm_clear_exception_queue(&svm->vcpu);
+       kvm_clear_interrupt_queue(&svm->vcpu);
+
+       svm_process_injected_event(svm,
+                                  svm->vmcb->control.exit_int_info,
+                                  svm->vmcb->control.exit_int_info_err);
+
 }
 
 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5431e6335e2e8..b5587650181f2 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -420,6 +420,10 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 void svm_flush_tlb(struct kvm_vcpu *vcpu);
+
+void svm_process_injected_event(struct vcpu_svm *svm, u32 event,
+                               u32 event_err_code);
+
 void disable_nmi_singlestep(struct vcpu_svm *svm);
 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
-- 
2.26.2

Reply via email to