AMD AVIC does not support ExtINT. Therefore, AVIC must be temporary deactivated and fall back to using legacy interrupt injection via vINTR and interrupt window.
Introduce svm_request_activate/deactivate_avic() helper functions, which handle steps required to activate/deactivate AVIC. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com> --- arch/x86/kvm/svm.c | 57 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f41f34f70dde..84116e689d5f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -391,6 +391,8 @@ static u8 rsm_ins_bytes[] = "\x0f\xaa"; static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); static void svm_complete_interrupts(struct vcpu_svm *svm); +static void svm_request_activate_avic(struct kvm_vcpu *vcpu); +static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu); static int nested_svm_exit_handled(struct vcpu_svm *svm); static int nested_svm_intercept(struct vcpu_svm *svm); @@ -2109,6 +2111,9 @@ static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) { struct vcpu_svm *svm = to_svm(vcpu); + if (!kvm_vcpu_apicv_active(vcpu)) + return; + svm->avic_is_running = is_run; if (is_run) avic_vcpu_load(vcpu, vcpu->cpu); @@ -2356,6 +2361,10 @@ static void svm_vcpu_blocking(struct kvm_vcpu *vcpu) static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu) { + if (kvm_check_request(KVM_REQ_APICV_ACTIVATE, vcpu)) + kvm_vcpu_activate_apicv(vcpu); + if (kvm_check_request(KVM_REQ_APICV_DEACTIVATE, vcpu)) + kvm_vcpu_deactivate_apicv(vcpu); avic_set_running(vcpu, true); } @@ -4505,6 +4514,15 @@ static int interrupt_window_interception(struct vcpu_svm *svm) { kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm_clear_vintr(svm); + + /* + * For AVIC, the only reason to end up here is ExtINTs. + * In this case AVIC was temporarily disabled for + * requesting the IRQ window and we have to re-enable it. + */ + if (svm_get_enable_apicv(&svm->vcpu)) + svm_request_activate_avic(&svm->vcpu); + svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; mark_dirty(svm->vmcb, VMCB_INTR); ++svm->vcpu.stat.irq_window_exits; @@ -5206,6 +5224,34 @@ static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) { } +static bool is_avic_active(struct vcpu_svm *svm) +{ + return (svm_get_enable_apicv(&svm->vcpu) && + svm->vmcb->control.int_ctl & AVIC_ENABLE_MASK); +} + +static void svm_request_activate_avic(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (!lapic_in_kernel(vcpu) || is_avic_active(svm)) + return; + + avic_setup_access_page(vcpu, false); + kvm_make_apicv_activate_request(vcpu->kvm); +} + +static void svm_request_deactivate_avic(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (!lapic_in_kernel(vcpu) || !is_avic_active(svm)) + return; + + avic_destroy_access_page(vcpu); + kvm_make_apicv_deactivate_request(vcpu->kvm); +} + /* Note: Currently only used by Hyper-V. */ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { @@ -5493,9 +5539,6 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (kvm_vcpu_apicv_active(vcpu)) - return; - /* * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes * 1, because that's a separate STGI/VMRUN intercept. The next time we @@ -5505,6 +5548,14 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) * window under the assumption that the hardware will set the GIF. */ if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) { + /* + * IRQ window is not needed when AVIC is enabled, + * unless we have pending ExtINT since it cannot be injected + * via AVIC. In such case, we need to temporarily disable AVIC, + * and fallback to injecting IRQ via V_IRQ. + */ + if (kvm_vcpu_apicv_active(vcpu)) + svm_request_deactivate_avic(&svm->vcpu); svm_set_vintr(svm); svm_inject_irq(svm, 0x0); } -- 2.17.1