Re: [PATCH v2 3/3] KVM: Non-atomic interrupt injection

2010-07-21 Thread Avi Kivity

On 07/21/2010 07:27 PM, Marcelo Tosatti wrote:



Also should undo vmx.rmode.* ?
   

Elaborate?
 

Undo vmx.rmode assignments on cancel_injection.
   


Hm.  Doesn't vmx_complete_interrupts() have to do that anyway if an 
injection fails?


Ah:

vmx_vcpu_run()
{
...
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
if (vmx->rmode.irq.pending)
fixup_rmode_irq(vmx);

...

vmx_complete_interrupts(vmx);

So I'll just move that bit into vmx_complete_interrupts.  Good catch.

--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 3/3] KVM: Non-atomic interrupt injection

2010-07-21 Thread Marcelo Tosatti
On Wed, Jul 21, 2010 at 08:37:26AM +0300, Avi Kivity wrote:
> On 07/21/2010 03:55 AM, Marcelo Tosatti wrote:
> >
> >>--- a/arch/x86/kvm/x86.c
> >>+++ b/arch/x86/kvm/x86.c
> >>@@ -4709,6 +4709,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> >>if (unlikely(r))
> >>goto out;
> >>
> >>+   inject_pending_event(vcpu);
> >>+
> >>+   /* enable NMI/IRQ window open exits if needed */
> >>+   if (vcpu->arch.nmi_pending)
> >>+   kvm_x86_ops->enable_nmi_window(vcpu);
> >>+   else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
> >>+   kvm_x86_ops->enable_irq_window(vcpu);
> >>+
> >>+   if (kvm_lapic_enabled(vcpu)) {
> >>+   update_cr8_intercept(vcpu);
> >>+   kvm_lapic_sync_to_vapic(vcpu);
> >>+   }
> >>+
> >>preempt_disable();
> >>
> >>kvm_x86_ops->prepare_guest_switch(vcpu);
> >>@@ -4727,23 +4740,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> >>smp_wmb();
> >>local_irq_enable();
> >>preempt_enable();
> >>+   kvm_x86_ops->cancel_injection(vcpu);
> >>r = 1;
> >>goto out;
> >>}
> >>
> >>-   inject_pending_event(vcpu);
> >>-
> >>-   /* enable NMI/IRQ window open exits if needed */
> >>-   if (vcpu->arch.nmi_pending)
> >>-   kvm_x86_ops->enable_nmi_window(vcpu);
> >>-   else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
> >>-   kvm_x86_ops->enable_irq_window(vcpu);
> >>-
> >>-   if (kvm_lapic_enabled(vcpu)) {
> >>-   update_cr8_intercept(vcpu);
> >>-   kvm_lapic_sync_to_vapic(vcpu);
> >>-   }
> >>-
> >>srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
> >>
> >>kvm_guest_enter();
> >This breaks
> >
> >int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
> >{
> > struct kvm_lapic *apic = vcpu->arch.apic;
> > int highest_irr;
> >
> > /* This may race with setting of irr in __apic_accept_irq() and
> >  * value returned may be wrong, but kvm_vcpu_kick() in
> >  * __apic_accept_irq
> >  * will cause vmexit immediately and the value will be
> >  * recalculated
> >  * on the next vmentry.
> >  */
> >
> >(also valid for nmi_pending and PIC). Can't simply move
> >atomic_set(guest_mode, 1) in preemptible section as that would make it
> >possible for kvm_vcpu_kick to IPI stale vcpu->cpu.
> 
> Right.  Can fix by adding a kvm_make_request() to force the retry loop.
> 
> >Also should undo vmx.rmode.* ?
> 
> Elaborate?

Undo vmx.rmode assignments on cancel_injection.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 3/3] KVM: Non-atomic interrupt injection

2010-07-20 Thread Avi Kivity

On 07/21/2010 03:55 AM, Marcelo Tosatti wrote:



--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4709,6 +4709,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(r))
goto out;

+   inject_pending_event(vcpu);
+
+   /* enable NMI/IRQ window open exits if needed */
+   if (vcpu->arch.nmi_pending)
+   kvm_x86_ops->enable_nmi_window(vcpu);
+   else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+   kvm_x86_ops->enable_irq_window(vcpu);
+
+   if (kvm_lapic_enabled(vcpu)) {
+   update_cr8_intercept(vcpu);
+   kvm_lapic_sync_to_vapic(vcpu);
+   }
+
preempt_disable();

kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -4727,23 +4740,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
smp_wmb();
local_irq_enable();
preempt_enable();
+   kvm_x86_ops->cancel_injection(vcpu);
r = 1;
goto out;
}

-   inject_pending_event(vcpu);
-
-   /* enable NMI/IRQ window open exits if needed */
-   if (vcpu->arch.nmi_pending)
-   kvm_x86_ops->enable_nmi_window(vcpu);
-   else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
-   kvm_x86_ops->enable_irq_window(vcpu);
-
-   if (kvm_lapic_enabled(vcpu)) {
-   update_cr8_intercept(vcpu);
-   kvm_lapic_sync_to_vapic(vcpu);
-   }
-
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);

kvm_guest_enter();
 

This breaks

int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
 struct kvm_lapic *apic = vcpu->arch.apic;
 int highest_irr;

 /* This may race with setting of irr in __apic_accept_irq() and
  * value returned may be wrong, but kvm_vcpu_kick() in
  * __apic_accept_irq
  * will cause vmexit immediately and the value will be
  * recalculated
  * on the next vmentry.
  */

(also valid for nmi_pending and PIC). Can't simply move
atomic_set(guest_mode, 1) in preemptible section as that would make it
possible for kvm_vcpu_kick to IPI stale vcpu->cpu.
   


Right.  Can fix by adding a kvm_make_request() to force the retry loop.


Also should undo vmx.rmode.* ?
   


Elaborate?

--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 3/3] KVM: Non-atomic interrupt injection

2010-07-20 Thread Marcelo Tosatti
On Tue, Jul 20, 2010 at 04:17:07PM +0300, Avi Kivity wrote:
> Change the interrupt injection code to work from preemptible, interrupts
> enabled context.  This works by adding a ->cancel_injection() operation
> that undoes an injection in case we were not able to actually enter the guest
> (this condition could never happen with atomic injection).
> 
> Signed-off-by: Avi Kivity 
> ---
>  arch/x86/include/asm/kvm_host.h |1 +
>  arch/x86/kvm/svm.c  |   12 
>  arch/x86/kvm/vmx.c  |   11 +++
>  arch/x86/kvm/x86.c  |   27 ++-
>  4 files changed, 38 insertions(+), 13 deletions(-)
> 

> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4709,6 +4709,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>   if (unlikely(r))
>   goto out;
>  
> + inject_pending_event(vcpu);
> +
> + /* enable NMI/IRQ window open exits if needed */
> + if (vcpu->arch.nmi_pending)
> + kvm_x86_ops->enable_nmi_window(vcpu);
> + else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
> + kvm_x86_ops->enable_irq_window(vcpu);
> +
> + if (kvm_lapic_enabled(vcpu)) {
> + update_cr8_intercept(vcpu);
> + kvm_lapic_sync_to_vapic(vcpu);
> + }
> +
>   preempt_disable();
>  
>   kvm_x86_ops->prepare_guest_switch(vcpu);
> @@ -4727,23 +4740,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>   smp_wmb();
>   local_irq_enable();
>   preempt_enable();
> + kvm_x86_ops->cancel_injection(vcpu);
>   r = 1;
>   goto out;
>   }
>  
> - inject_pending_event(vcpu);
> -
> - /* enable NMI/IRQ window open exits if needed */
> - if (vcpu->arch.nmi_pending)
> - kvm_x86_ops->enable_nmi_window(vcpu);
> - else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
> - kvm_x86_ops->enable_irq_window(vcpu);
> -
> - if (kvm_lapic_enabled(vcpu)) {
> - update_cr8_intercept(vcpu);
> - kvm_lapic_sync_to_vapic(vcpu);
> - }
> -
>   srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  
>   kvm_guest_enter();

This breaks

int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;

/* This may race with setting of irr in __apic_accept_irq() and
 * value returned may be wrong, but kvm_vcpu_kick() in
 * __apic_accept_irq
 * will cause vmexit immediately and the value will be
 * recalculated
 * on the next vmentry.
 */

(also valid for nmi_pending and PIC). Can't simply move
atomic_set(guest_mode, 1) in preemptible section as that would make it
possible for kvm_vcpu_kick to IPI stale vcpu->cpu.

Also should undo vmx.rmode.* ?


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 3/3] KVM: Non-atomic interrupt injection

2010-07-20 Thread Avi Kivity
Change the interrupt injection code to work from preemptible, interrupts
enabled context.  This works by adding a ->cancel_injection() operation
that undoes an injection in case we were not able to actually enter the guest
(this condition could never happen with atomic injection).

Signed-off-by: Avi Kivity 
---
 arch/x86/include/asm/kvm_host.h |1 +
 arch/x86/kvm/svm.c  |   12 
 arch/x86/kvm/vmx.c  |   11 +++
 arch/x86/kvm/x86.c  |   27 ++-
 4 files changed, 38 insertions(+), 13 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 502e53f..5dd797c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -505,6 +505,7 @@ struct kvm_x86_ops {
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject);
+   void (*cancel_injection)(struct kvm_vcpu *vcpu);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 56c9b6b..46d068e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3135,6 +3135,17 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
}
 }
 
+static void svm_cancel_injection(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   struct vmcb_control_area *control = &svm->vmcb->control;
+
+   control->exit_int_info = control->event_inj;
+   control->exit_int_info_err = control->event_inj_err;
+   control->event_inj = 0;
+   svm_complete_interrupts(svm);
+}
+
 #ifdef CONFIG_X86_64
 #define R "r"
 #else
@@ -3493,6 +3504,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_irq = svm_set_irq,
.set_nmi = svm_inject_nmi,
.queue_exception = svm_queue_exception,
+   .cancel_injection = svm_cancel_injection,
.interrupt_allowed = svm_interrupt_allowed,
.nmi_allowed = svm_nmi_allowed,
.get_nmi_mask = svm_get_nmi_mask,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 53b6fc0..72381b7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3906,6 +3906,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
  IDT_VECTORING_ERROR_CODE);
 }
 
+static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
+{
+   __vmx_complete_interrupts(to_vmx(vcpu),
+ vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
+ VM_ENTRY_INSTRUCTION_LEN,
+ VM_ENTRY_EXCEPTION_ERROR_CODE);
+
+   vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
+}
+
 /*
  * Failure to inject an interrupt should give us the information
  * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
@@ -4360,6 +4370,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_irq = vmx_inject_irq,
.set_nmi = vmx_inject_nmi,
.queue_exception = vmx_queue_exception,
+   .cancel_injection = vmx_cancel_injection,
.interrupt_allowed = vmx_interrupt_allowed,
.nmi_allowed = vmx_nmi_allowed,
.get_nmi_mask = vmx_get_nmi_mask,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 84bfb51..1040d3f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4709,6 +4709,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(r))
goto out;
 
+   inject_pending_event(vcpu);
+
+   /* enable NMI/IRQ window open exits if needed */
+   if (vcpu->arch.nmi_pending)
+   kvm_x86_ops->enable_nmi_window(vcpu);
+   else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+   kvm_x86_ops->enable_irq_window(vcpu);
+
+   if (kvm_lapic_enabled(vcpu)) {
+   update_cr8_intercept(vcpu);
+   kvm_lapic_sync_to_vapic(vcpu);
+   }
+
preempt_disable();
 
kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -4727,23 +4740,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
smp_wmb();
local_irq_enable();
preempt_enable();
+   kvm_x86_ops->cancel_injection(vcpu);
r = 1;
goto out;
}
 
-   inject_pending_event(vcpu);
-
-   /* enable NMI/IRQ window open exits if needed */
-   if (vcpu->arch.nmi_pending)
-   kvm_x86_ops->enable_nmi_window(vcpu);
-   else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
-   kvm_x86_ops->enable_irq_window(vcpu);
-
-   if (kvm_lapic_enabled(vcpu)) {
-   update_cr8_intercept(vcpu);
-   kvm_lapic_sync_to_vapic(vcpu);
-   }
-
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
kvm_guest_enter();
-- 
1.7.1

--
To unsubscribe from this list: