Enable direct access to EOI MSR of x2apic to accelerate guests. This accelerate handling of interrupts delivered directly to guest from passed-through PCI devices. When a virtual IRQ is injected, this feature is disabled in order to route following EOI to virtual APIC. Then, it is enabled again after every virtual IRQ is handled.
Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama...@hitachi.com> Cc: Avi Kivity <a...@redhat.com> Cc: Marcelo Tosatti <mtosa...@redhat.com> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Ingo Molnar <mi...@redhat.com> Cc: "H. Peter Anvin" <h...@zytor.com> --- arch/x86/kvm/vmx.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed, 67 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 39a4cb4..f93e08c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -636,6 +636,10 @@ static unsigned long *vmx_io_bitmap_a; static unsigned long *vmx_io_bitmap_b; static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_longmode; +#ifdef CONFIG_SLAVE_CPU +static unsigned long *vmx_msr_bitmap_slave_legacy; +static unsigned long *vmx_msr_bitmap_slave_longmode; +#endif static bool cpu_has_load_ia32_efer; static bool cpu_has_load_perf_global_ctrl; @@ -912,6 +916,11 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification); +static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only); +#ifdef CONFIG_SLAVE_CPU +static void vmx_disable_intercept_for_msr_slave(u32 msr, bool longmode_only); +#endif + static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -1716,13 +1725,28 @@ static void vmx_set_direct_interrupt(struct kvm_vcpu *vcpu, bool enabled) #ifdef CONFIG_SLAVE_CPU void *msr_bitmap; - if (enabled) + if (enabled) { vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_EXT_INTR_MASK); - else + + if (cpu_has_vmx_msr_bitmap()) { + msr_bitmap = is_long_mode(vcpu) ? + vmx_msr_bitmap_slave_longmode : + vmx_msr_bitmap_slave_legacy; + vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); + } + } else { vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_EXT_INTR_MASK); + if (cpu_has_vmx_msr_bitmap()) { + msr_bitmap = is_long_mode(vcpu) ? + vmx_msr_bitmap_longmode : + vmx_msr_bitmap_legacy; + vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); + } + } + trace_kvm_set_direct_interrupt(vcpu, enabled); #endif } @@ -3771,6 +3795,16 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr); } +#ifdef CONFIG_SLAVE_CPU +static void vmx_disable_intercept_for_msr_slave(u32 msr, bool longmode_only) +{ + if (!longmode_only) + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_slave_legacy, + msr); + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_slave_longmode, msr); +} +#endif + /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. @@ -7474,6 +7508,22 @@ static int __init vmx_init(void) goto out2; +#ifdef CONFIG_SLAVE_CPU + vmx_msr_bitmap_slave_legacy = + (unsigned long *)__get_free_page(GFP_KERNEL); + if (!vmx_msr_bitmap_slave_legacy) { + r = -ENOMEM; + goto out1s; + } + + vmx_msr_bitmap_slave_longmode = + (unsigned long *)__get_free_page(GFP_KERNEL); + if (!vmx_msr_bitmap_slave_longmode) { + r = -ENOMEM; + goto out2s; + } +#endif + /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). @@ -7500,6 +7550,15 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); +#ifdef CONFIG_SLAVE_CPU + memcpy(vmx_msr_bitmap_slave_legacy, + vmx_msr_bitmap_legacy, PAGE_SIZE); + memcpy(vmx_msr_bitmap_slave_longmode, + vmx_msr_bitmap_longmode, PAGE_SIZE); + vmx_disable_intercept_for_msr_slave( + APIC_BASE_MSR + (APIC_EOI >> 4), false); +#endif + if (enable_ept) { kvm_mmu_set_mask_ptes(0ull, (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, @@ -7513,6 +7572,12 @@ static int __init vmx_init(void) return 0; out3: +#ifdef CONFIG_SLAVE_CPU + free_page((unsigned long)vmx_msr_bitmap_slave_longmode); +out2s: + free_page((unsigned long)vmx_msr_bitmap_slave_legacy); +out1s: +#endif free_page((unsigned long)vmx_msr_bitmap_longmode); out2: free_page((unsigned long)vmx_msr_bitmap_legacy); -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html