[COMMIT master] KVM: Non-atomic interrupt injection

2010-09-16 Thread Avi Kivity
From: Avi Kivity 

Change the interrupt injection code to work from preemptible, interrupts
enabled context.  This works by adding a ->cancel_injection() operation
that undoes an injection in case we were not able to actually enter the guest
(this condition could never happen with atomic injection).

Signed-off-by: Avi Kivity 

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3a00741..02f9780 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -552,6 +552,7 @@ struct kvm_x86_ops {
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject);
+   void (*cancel_injection)(struct kvm_vcpu *vcpu);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bc317eb..43f5558 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3261,6 +3261,17 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
}
 }
 
+static void svm_cancel_injection(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   struct vmcb_control_area *control = &svm->vmcb->control;
+
+   control->exit_int_info = control->event_inj;
+   control->exit_int_info_err = control->event_inj_err;
+   control->event_inj = 0;
+   svm_complete_interrupts(svm);
+}
+
 #ifdef CONFIG_X86_64
 #define R "r"
 #else
@@ -3626,6 +3637,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_irq = svm_set_irq,
.set_nmi = svm_inject_nmi,
.queue_exception = svm_queue_exception,
+   .cancel_injection = svm_cancel_injection,
.interrupt_allowed = svm_interrupt_allowed,
.nmi_allowed = svm_nmi_allowed,
.get_nmi_mask = svm_get_nmi_mask,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 108dbaf..199fa7e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3901,6 +3901,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
  IDT_VECTORING_ERROR_CODE);
 }
 
+static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
+{
+   __vmx_complete_interrupts(to_vmx(vcpu),
+ vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
+ VM_ENTRY_INSTRUCTION_LEN,
+ VM_ENTRY_EXCEPTION_ERROR_CODE);
+
+   vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
+}
+
 /*
  * Failure to inject an interrupt should give us the information
  * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
@@ -4354,6 +4364,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_irq = vmx_inject_irq,
.set_nmi = vmx_inject_nmi,
.queue_exception = vmx_queue_exception,
+   .cancel_injection = vmx_cancel_injection,
.interrupt_allowed = vmx_interrupt_allowed,
.nmi_allowed = vmx_nmi_allowed,
.get_nmi_mask = vmx_get_nmi_mask,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e719803..a465bd2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5005,7 +5005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
-   bool req_event;
 
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5041,6 +5040,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(r))
goto out;
 
+   if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+   inject_pending_event(vcpu);
+
+   /* enable NMI/IRQ window open exits if needed */
+   if (vcpu->arch.nmi_pending)
+   kvm_x86_ops->enable_nmi_window(vcpu);
+   else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+   kvm_x86_ops->enable_irq_window(vcpu);
+
+   if (kvm_lapic_enabled(vcpu)) {
+   update_cr8_intercept(vcpu);
+   kvm_lapic_sync_to_vapic(vcpu);
+   }
+   }
+
preempt_disable();
 
kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5053,35 +5067,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
local_irq_disable();
 
-   req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
-
if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
|| need_resched() || signal_pending(current)) {
-   if (req_event)
-   kvm_make_request(KVM_REQ_EVENT, vcpu);
atomic_set(&vcpu->guest_mode, 0);
smp_wmb();
local_irq_enable();
preempt_enable();
+   kvm_x86_ops->cancel_injection(vcpu);
r = 1;
goto out;

[COMMIT master] KVM: VMX: Move fixup_rmode_irq() to avoid forward declaration

2010-09-16 Thread Avi Kivity
From: Avi Kivity 

No code changes.

Signed-off-by: Avi Kivity 

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 199fa7e..8ef6199 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -182,7 +182,6 @@ static int init_rmode(struct kvm *kvm);
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
-static void fixup_rmode_irq(struct vcpu_vmx *vmx, u32 *idt_vectoring_info);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -3833,6 +3832,29 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx 
*vmx)
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
 }
 
+/*
+ * Failure to inject an interrupt should give us the information
+ * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
+ * when fetching the interrupt redirection bitmap in the real-mode
+ * tss, this doesn't happen.  So we do it ourselves.
+ */
+static void fixup_rmode_irq(struct vcpu_vmx *vmx, u32 *idt_vectoring_info)
+{
+   vmx->rmode.irq.pending = 0;
+   if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
+   return;
+   kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
+   if (*idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
+   *idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
+   *idt_vectoring_info |= INTR_TYPE_EXT_INTR;
+   return;
+   }
+   *idt_vectoring_info =
+   VECTORING_INFO_VALID_MASK
+   | INTR_TYPE_EXT_INTR
+   | vmx->rmode.irq.vector;
+}
+
 static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
  u32 idt_vectoring_info,
  int instr_len_field,
@@ -3911,29 +3933,6 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
 }
 
-/*
- * Failure to inject an interrupt should give us the information
- * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
- * when fetching the interrupt redirection bitmap in the real-mode
- * tss, this doesn't happen.  So we do it ourselves.
- */
-static void fixup_rmode_irq(struct vcpu_vmx *vmx, u32 *idt_vectoring_info)
-{
-   vmx->rmode.irq.pending = 0;
-   if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
-   return;
-   kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
-   if (*idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
-   *idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
-   *idt_vectoring_info |= INTR_TYPE_EXT_INTR;
-   return;
-   }
-   *idt_vectoring_info =
-   VECTORING_INFO_VALID_MASK
-   | INTR_TYPE_EXT_INTR
-   | vmx->rmode.irq.vector;
-}
-
 #ifdef CONFIG_X86_64
 #define R "r"
 #define Q "q"
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[COMMIT master] KVM: MMU: Don't track nested fault info in error-code

2010-09-16 Thread Avi Kivity
From: Joerg Roedel 

This patch moves the detection whether a page-fault was
nested or not out of the error code and moves it into a
separate variable in the fault struct.

Signed-off-by: Joerg Roedel 
Signed-off-by: Avi Kivity 

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 02f9780..8c5779d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -322,6 +322,7 @@ struct kvm_vcpu_arch {
struct {
u64  address;
unsigned error_code;
+   bool nested;
} fault;
 
/* only needed in kvm_pv_mmu_op() path, but it's hot so
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 513abbb..7086ca8 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -47,7 +47,6 @@
 #define PFERR_USER_MASK (1U << 2)
 #define PFERR_RSVD_MASK (1U << 3)
 #define PFERR_FETCH_MASK (1U << 4)
-#define PFERR_NESTED_MASK (1U << 31)
 
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a465bd2..a51635e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -342,18 +342,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
 
 void kvm_propagate_fault(struct kvm_vcpu *vcpu)
 {
-   u32 nested, error;
-
-   error   = vcpu->arch.fault.error_code;
-   nested  = error &  PFERR_NESTED_MASK;
-   error   = error & ~PFERR_NESTED_MASK;
-
-   vcpu->arch.fault.error_code = error;
-
-   if (mmu_is_nested(vcpu) && !nested)
+   if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
vcpu->arch.nested_mmu.inject_page_fault(vcpu);
else
vcpu->arch.mmu.inject_page_fault(vcpu);
+
+   vcpu->arch.fault.nested = false;
 }
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -3524,7 +3518,7 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 access)
access |= PFERR_USER_MASK;
t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
if (t_gpa == UNMAPPED_GVA)
-   vcpu->arch.fault.error_code |= PFERR_NESTED_MASK;
+   vcpu->arch.fault.nested = true;
 
return t_gpa;
 }
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[COMMIT master] KVM: VMX: Parameterize vmx_complete_interrupts() for both exit and entry

2010-09-16 Thread Avi Kivity
From: Avi Kivity 

Currently vmx_complete_interrupts() can decode event information from vmx
exit fields into the generic kvm event queues.  Make it able to decode
the information from the entry fields as well by parametrizing it.

Signed-off-by: Avi Kivity 

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c10d700..108dbaf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -182,7 +182,7 @@ static int init_rmode(struct kvm *kvm);
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
-static void fixup_rmode_irq(struct vcpu_vmx *vmx);
+static void fixup_rmode_irq(struct vcpu_vmx *vmx, u32 *idt_vectoring_info);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -3833,17 +3833,18 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx 
*vmx)
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
 }
 
-static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
+static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
+ u32 idt_vectoring_info,
+ int instr_len_field,
+ int error_code_field)
 {
-   u32 idt_vectoring_info;
u8 vector;
int type;
bool idtv_info_valid;
 
if (vmx->rmode.irq.pending)
-   fixup_rmode_irq(vmx);
+   fixup_rmode_irq(vmx, &idt_vectoring_info);
 
-   idt_vectoring_info = vmx->idt_vectoring_info;
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
vmx->vcpu.arch.nmi_injected = false;
@@ -3871,18 +3872,18 @@ static void vmx_complete_interrupts(struct vcpu_vmx 
*vmx)
break;
case INTR_TYPE_SOFT_EXCEPTION:
vmx->vcpu.arch.event_exit_inst_len =
-   vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+   vmcs_read32(instr_len_field);
/* fall through */
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
-   u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
+   u32 err = vmcs_read32(error_code_field);
kvm_queue_exception_e(&vmx->vcpu, vector, err);
} else
kvm_queue_exception(&vmx->vcpu, vector);
break;
case INTR_TYPE_SOFT_INTR:
vmx->vcpu.arch.event_exit_inst_len =
-   vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+   vmcs_read32(instr_len_field);
/* fall through */
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(&vmx->vcpu, vector,
@@ -3893,24 +3894,31 @@ static void vmx_complete_interrupts(struct vcpu_vmx 
*vmx)
}
 }
 
+static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
+{
+   __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
+ VM_EXIT_INSTRUCTION_LEN,
+ IDT_VECTORING_ERROR_CODE);
+}
+
 /*
  * Failure to inject an interrupt should give us the information
  * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
  * when fetching the interrupt redirection bitmap in the real-mode
  * tss, this doesn't happen.  So we do it ourselves.
  */
-static void fixup_rmode_irq(struct vcpu_vmx *vmx)
+static void fixup_rmode_irq(struct vcpu_vmx *vmx, u32 *idt_vectoring_info)
 {
vmx->rmode.irq.pending = 0;
if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
return;
kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
-   if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
-   vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
-   vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
+   if (*idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
+   *idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
+   *idt_vectoring_info |= INTR_TYPE_EXT_INTR;
return;
}
-   vmx->idt_vectoring_info =
+   *idt_vectoring_info =
VECTORING_INFO_VALID_MASK
| INTR_TYPE_EXT_INTR
| vmx->rmode.irq.vector;
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[COMMIT master] KVM: Check for pending events before attempting injection

2010-09-16 Thread Avi Kivity
From: Avi Kivity 

Instead of blindly attempting to inject an event before each guest entry,
check for a possible event first in vcpu->requests.  Sites that can trigger
event injection are modified to set KVM_REQ_EVENT:

- interrupt, nmi window opening
- ppr updates
- i8259 output changes
- local apic irr changes
- rflags updates
- gif flag set
- event set on exit

This improves non-injecting entry performance, and sets the stage for
non-atomic injection.

Signed-off-by: Avi Kivity 

diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 6e77471..ab1bb8f 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -67,6 +67,7 @@ static void pic_unlock(struct kvm_pic *s)
if (!found)
return;
 
+   kvm_make_request(KVM_REQ_EVENT, found);
kvm_vcpu_kick(found);
}
 }
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 77d8c0f..c6f2f15 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -259,9 +259,10 @@ static inline int apic_find_highest_isr(struct kvm_lapic 
*apic)
 
 static void apic_update_ppr(struct kvm_lapic *apic)
 {
-   u32 tpr, isrv, ppr;
+   u32 tpr, isrv, ppr, old_ppr;
int isr;
 
+   old_ppr = apic_get_reg(apic, APIC_PROCPRI);
tpr = apic_get_reg(apic, APIC_TASKPRI);
isr = apic_find_highest_isr(apic);
isrv = (isr != -1) ? isr : 0;
@@ -274,7 +275,10 @@ static void apic_update_ppr(struct kvm_lapic *apic)
apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
   apic, ppr, isr, isrv);
 
-   apic_set_reg(apic, APIC_PROCPRI, ppr);
+   if (old_ppr != ppr) {
+   apic_set_reg(apic, APIC_PROCPRI, ppr);
+   kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+   }
 }
 
 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
@@ -391,6 +395,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int 
delivery_mode,
break;
}
 
+   kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
break;
 
@@ -416,6 +421,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int 
delivery_mode,
   "INIT on a runnable vcpu %d\n",
   vcpu->vcpu_id);
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+   kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
} else {
apic_debug("Ignoring de-assert INIT to vcpu %d\n",
@@ -430,6 +436,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int 
delivery_mode,
result = 1;
vcpu->arch.sipi_vector = vector;
vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
+   kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}
break;
@@ -475,6 +482,7 @@ static void apic_set_eoi(struct kvm_lapic *apic)
trigger_mode = IOAPIC_EDGE_TRIG;
if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+   kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 }
 
 static void apic_send_ipi(struct kvm_lapic *apic)
@@ -1152,6 +1160,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
update_divide_count(apic);
start_apic_timer(apic);
apic->irr_pending = true;
+   kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 
 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eeb08d6..bc317eb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2371,6 +2371,7 @@ static int stgi_interception(struct vcpu_svm *svm)
 
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
+   kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 
enable_gif(svm);
 
@@ -2763,6 +2764,7 @@ static int interrupt_window_interception(struct vcpu_svm 
*svm)
 {
struct kvm_run *kvm_run = svm->vcpu.run;
 
+   kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
/*
@@ -3209,8 +3211,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
 
svm->int3_injected = 0;
 
-   if (svm->vcpu.arch.hflags & HF_IRET_MASK)
+   if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
+   kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+   }
 
svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu);
@@ -3219,6 +3223,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
if (!(exitintinfo & SVM_EXITINTINFO_VALID))
return;
 
+   kvm_make_requ

[COMMIT master] Rename KVM_UPSTREAM to OBSOLETE_KVM_IMPL

2010-09-16 Thread Avi Kivity
From: Avi Kivity 

The symbol KVM_UPSTREAM is used to mark sections of code that are part of
the upstream kvm implemetation that is not used in qemu-kvm.  However the
name becomes ambiguous if qemu-kvm is merged upstream.

Rename the symbol to avoid confusion.

Signed-off-by: Avi Kivity 

diff --git a/cpus.c b/cpus.c
index c545a62..99c04d1 100644
--- a/cpus.c
+++ b/cpus.c
@@ -299,7 +299,7 @@ void qemu_notify_event(void)
 }
 }
 
-#if defined(KVM_UPSTREAM) || !defined(CONFIG_KVM)
+#if defined(OBSOLETE_KVM_IMPL) || !defined(CONFIG_KVM)
 void qemu_mutex_lock_iothread(void) {}
 void qemu_mutex_unlock_iothread(void) {}
 #endif
diff --git a/kvm-all.c b/kvm-all.c
index 4ff75c4..d4b0861 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -41,7 +41,7 @@
 do { } while (0)
 #endif
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 
 typedef struct KVMSlot
 {
@@ -156,7 +156,7 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot 
*slot)
 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 static void kvm_reset_vcpu(void *opaque)
 {
 CPUState *env = opaque;
@@ -176,7 +176,7 @@ int kvm_pit_in_kernel(void)
 }
 
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 int kvm_init_vcpu(CPUState *env)
 {
 KVMState *s = kvm_state;
@@ -594,7 +594,7 @@ void kvm_cpu_register_phys_memory_client(void)
 cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client);
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 
 int kvm_init(int smp_cpus)
 {
@@ -816,7 +816,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
 #endif
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 
 static void do_kvm_cpu_synchronize_state(void *_env)
 {
@@ -1038,7 +1038,7 @@ int kvm_has_debugregs(void)
 return kvm_state->debugregs;
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 int kvm_has_xsave(void)
 {
 return kvm_state->xsave;
@@ -1069,10 +1069,10 @@ void kvm_setup_guest_memory(void *start, size_t size)
 }
 
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-#ifndef KVM_UPSTREAM
+#ifndef OBSOLETE_KVM_IMPL
 #define run_on_cpu on_vcpu
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data);
-#endif /* !KVM_UPSTREAM */
+#endif /* !OBSOLETE_KVM_IMPL */
 
 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
  target_ulong pc)
diff --git a/kvm.h b/kvm.h
index d321fce..56236ae 100644
--- a/kvm.h
+++ b/kvm.h
@@ -31,13 +31,13 @@ extern int kvm_allowed;
 #define kvm_enabled() (0)
 #endif
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 struct kvm_run;
 
 /* external API */
 
 int kvm_init(int smp_cpus);
-#endif /* KVM_UPSTREAM */
+#endif /* OBSOLETE_KVM_IMPL */
 
 int kvm_has_sync_mmu(void);
 int kvm_has_vcpu_events(void);
@@ -96,7 +96,7 @@ int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run);
 
 int kvm_arch_pre_run(CPUState *env, struct kvm_run *run);
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 int kvm_arch_process_irqchip_events(CPUState *env);
 #endif
 
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index b00e80d..f4fc063 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -188,7 +188,7 @@ int kvm_arch_init_vcpu(CPUState *env)
 return r;
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 
 env->mp_state = KVM_MP_STATE_RUNNABLE;
 
@@ -304,7 +304,7 @@ void kvm_arch_reset_vcpu(CPUState *env)
 env->mp_state = KVM_MP_STATE_RUNNABLE;
 }
 }
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 
 static int kvm_has_msr_star(CPUState *env)
 {
@@ -644,7 +644,7 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
 entry->data = value;
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 static int kvm_put_msrs(CPUState *env, int level)
 {
 struct {
@@ -1104,7 +1104,7 @@ static int kvm_get_debugregs(CPUState *env)
 return 0;
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 int kvm_arch_put_registers(CPUState *env, int level)
 {
 int ret;
@@ -1242,7 +1242,7 @@ int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
 return 0;
 }
 
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 
 int kvm_arch_process_irqchip_events(CPUState *env)
 {
diff --git a/vl.c b/vl.c
index 22a3616..378a176 100644
--- a/vl.c
+++ b/vl.c
@@ -2466,7 +2466,7 @@ int main(int argc, char **argv, char **envp)
 case QEMU_OPTION_smbios:
 do_smbios_option(optarg);
 break;
-#ifdef KVM_UPSTREAM
+#ifdef OBSOLETE_KVM_IMPL
 case QEMU_OPTION_enable_kvm:
 kvm_allowed = 1;
 #endif
@@ -2803,7 +2803,7 @@ int main(int argc, char **argv, char **envp)
 if (kvm_allowed) {
 int ret = kvm_init(smp_cpus);
 if (ret < 0) {
-#if defined(KVM_UPSTREAM) || defined(CONFIG_NO_CPU_EMULATION)
+#if defined(OBSOLETE_KVM_IMPL) || defined(CONFIG_NO_CPU_EMULATION)
 if (!kvm_available()) {
 printf("KVM not supported for this target\n");
 } else {
--
To unsubscribe from this list: send the

[COMMIT master] KVM: VMX: Split up vmx_complete_interrupts()

2010-09-16 Thread Avi Kivity
From: Avi Kivity 

vmx_complete_interrupts() does too much, split it up:
 - vmx_vcpu_run() gets the "cache important vmcs fields" part
 - a new vmx_complete_atomic_exit() gets the parts that must be done atomically
 - a new vmx_recover_nmi_blocking() does what its name says
 - vmx_complete_interrupts() retains the event injection recovery code

This helps in reducing the work done in atomic context.

Signed-off-by: Avi Kivity 

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 291f99c..568f936 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -125,6 +125,7 @@ struct vcpu_vmx {
unsigned long host_rsp;
int   launched;
u8fail;
+   u32   exit_intr_info;
u32   idt_vectoring_info;
struct shared_msr_entry *guest_msrs;
int   nmsrs;
@@ -3781,18 +3782,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, 
int tpr, int irr)
vmcs_write32(TPR_THRESHOLD, irr);
 }
 
-static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
+static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 {
-   u32 exit_intr_info;
-   u32 idt_vectoring_info = vmx->idt_vectoring_info;
-   bool unblock_nmi;
-   u8 vector;
-   int type;
-   bool idtv_info_valid;
-
-   exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-
-   vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+   u32 exit_intr_info = vmx->exit_intr_info;
 
/* Handle machine checks before interrupts are enabled */
if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
@@ -3807,8 +3799,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
asm("int $2");
kvm_after_handle_nmi(&vmx->vcpu);
}
+}
 
-   idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
+static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
+{
+   u32 exit_intr_info = vmx->exit_intr_info;
+   bool unblock_nmi;
+   u8 vector;
+   bool idtv_info_valid;
+
+   idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
if (cpu_has_virtual_nmis()) {
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
@@ -3830,6 +3830,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
} else if (unlikely(vmx->soft_vnmi_blocked))
vmx->vnmi_blocked_time +=
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
+}
+
+static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
+{
+   u32 idt_vectoring_info = vmx->idt_vectoring_info;
+   u8 vector;
+   int type;
+   bool idtv_info_valid;
+
+   idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
vmx->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&vmx->vcpu);
@@ -4042,6 +4052,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->launched = 1;
 
+   vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+   vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+   vmx_complete_atomic_exit(vmx);
+   vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
 }
 
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[COMMIT master] KVM: VMX: Move real-mode interrupt injection fixup to vmx_complete_interrupts()

2010-09-16 Thread Avi Kivity
From: Avi Kivity 

This allows reuse of vmx_complete_interrupts() for cancelling injections.

Signed-off-by: Avi Kivity 

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 568f936..c10d700 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -182,6 +182,7 @@ static int init_rmode(struct kvm *kvm);
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
+static void fixup_rmode_irq(struct vcpu_vmx *vmx);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -3834,11 +3835,15 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx 
*vmx)
 
 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
 {
-   u32 idt_vectoring_info = vmx->idt_vectoring_info;
+   u32 idt_vectoring_info;
u8 vector;
int type;
bool idtv_info_valid;
 
+   if (vmx->rmode.irq.pending)
+   fixup_rmode_irq(vmx);
+
+   idt_vectoring_info = vmx->idt_vectoring_info;
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
vmx->vcpu.arch.nmi_injected = false;
@@ -4046,8 +4051,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs_dirty = 0;
 
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
-   if (vmx->rmode.irq.pending)
-   fixup_rmode_irq(vmx);
 
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->launched = 1;
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html