On Wed, Mar 13, 2013 at 05:53:45PM +0100, Jan Kiszka wrote:
> If we are in guest mode, L0 can only inject events into L2 if L1 has
> nothing pending. Otherwise, L0 would overwrite L1's events and they
> would get lost. This check is conceptually independent of
> nested_exit_on_intr.
> 
> If L1 traps external interrupts, then we also need to look at L1's
> idt_vectoring_info_field. If it is empty, we can kick the guest from L2
> to L1, just like the previous code worked.
> 
> Finally, the logic for checking interrupt has to be applied also on NMIs
> in an analogous way. This enables NMI interception for nested guests.
> 
> Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>
> ---
>  arch/x86/kvm/vmx.c |   59 ++++++++++++++++++++++++++++++++++++++++++++-------
>  1 files changed, 51 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index b50174d..10de336 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -4211,6 +4211,12 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
>               PIN_BASED_EXT_INTR_MASK;
>  }
>  
> +static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
> +{
> +     return get_vmcs12(vcpu)->pin_based_vm_exec_control &
> +             PIN_BASED_NMI_EXITING;
> +}
> +
It will take me some time to review this, but I have a small nit now.
You open code checking of this bit in your previous patch, why not move
this hunk there?

>  static void enable_irq_window(struct kvm_vcpu *vcpu)
>  {
>       u32 cpu_based_vm_exec_control;
> @@ -4307,6 +4313,30 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
>  
>  static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
>  {
> +     if (is_guest_mode(vcpu)) {
> +             struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> +
> +             if (to_vmx(vcpu)->nested.nested_run_pending &&
> +                 (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK))
> +                     return 0;
> +             if (nested_exit_on_nmi(vcpu)) {
> +                     /*
> +                      * Check if the idt_vectoring_info_field is free. We
> +                      * cannot raise EXIT_REASON_EXCEPTION_NMI if it isn't.
> +                      */
> +                     if (vmcs12->idt_vectoring_info_field &
> +                         VECTORING_INFO_VALID_MASK)
> +                             return 0;
> +                     nested_vmx_vmexit(vcpu);
> +                     vmcs12->vm_exit_reason = EXIT_REASON_EXCEPTION_NMI;
> +                     vmcs12->vm_exit_intr_info = NMI_VECTOR |
> +                             INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK;
> +                     /*
> +                      * fall through to normal code, but now in L1, not L2
> +                      */
> +             }
> +     }
> +
>       if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
>               return 0;
>  
> @@ -4346,16 +4376,29 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, 
> bool masked)
>  
>  static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
>  {
> -     if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
> +     if (is_guest_mode(vcpu)) {
>               struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> -             if (to_vmx(vcpu)->nested.nested_run_pending ||
> -                 (vmcs12->idt_vectoring_info_field &
> -                  VECTORING_INFO_VALID_MASK))
> +
> +             if (to_vmx(vcpu)->nested.nested_run_pending &&
> +                 (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK))
>                       return 0;
> -             nested_vmx_vmexit(vcpu);
> -             vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
> -             vmcs12->vm_exit_intr_info = 0;
> -             /* fall through to normal code, but now in L1, not L2 */
> +             if (nested_exit_on_intr(vcpu)) {
> +                     /*
> +                      * Check if the idt_vectoring_info_field is free. We
> +                      * cannot raise EXIT_REASON_EXTERNAL_INTERRUPT if it
> +                      * isn't.
> +                      */
> +                     if (vmcs12->idt_vectoring_info_field &
> +                         VECTORING_INFO_VALID_MASK)
> +                             return 0;
> +                     nested_vmx_vmexit(vcpu);
> +                     vmcs12->vm_exit_reason =
> +                             EXIT_REASON_EXTERNAL_INTERRUPT;
> +                     vmcs12->vm_exit_intr_info = 0;
> +                     /*
> +                      * fall through to normal code, but now in L1, not L2
> +                      */
> +             }
>       }
>  
>       return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
> -- 
> 1.7.3.4

--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to