From: Wanpeng Li <wanpeng...@hotmail.com>

------------[ cut here ]------------
WARNING: CPU: 7 PID: 3861 at /home/kernel/ssd/kvm/arch/x86/kvm//vmx.c:11299 
nested_vmx_vmexit+0x176e/0x1980 [kvm_intel]
CPU: 7 PID: 3861 Comm: qemu-system-x86 Tainted: G        W  OE   4.13.0-rc4+ #11
RIP: 0010:nested_vmx_vmexit+0x176e/0x1980 [kvm_intel]
Call Trace:
 ? kvm_multiple_exception+0x149/0x170 [kvm]
 ? handle_emulation_failure+0x79/0x230 [kvm]
 ? load_vmcs12_host_state+0xa80/0xa80 [kvm_intel]
 ? check_chain_key+0x137/0x1e0
 ? reexecute_instruction.part.168+0x130/0x130 [kvm]
 nested_vmx_inject_exception_vmexit+0xb7/0x100 [kvm_intel]
 ? nested_vmx_inject_exception_vmexit+0xb7/0x100 [kvm_intel]
 vmx_queue_exception+0x197/0x300 [kvm_intel]
 kvm_arch_vcpu_ioctl_run+0x1b0c/0x2c90 [kvm]
 ? kvm_arch_vcpu_runnable+0x220/0x220 [kvm]
 ? preempt_count_sub+0x18/0xc0
 ? restart_apic_timer+0x17d/0x300 [kvm]
 ? kvm_lapic_restart_hv_timer+0x37/0x50 [kvm]
 ? kvm_arch_vcpu_load+0x1d8/0x350 [kvm]
 kvm_vcpu_ioctl+0x4e4/0x910 [kvm]
 ? kvm_vcpu_ioctl+0x4e4/0x910 [kvm]
 ? kvm_dev_ioctl+0xbe0/0xbe0 [kvm]

The flag "nested_run_pending", which can override the decision of which should 
run 
next, L1 or L2. nested_run_pending=1 means that we *must* run L2 next, not L1. 
This 
is necessary in particular when L1 did a VMLAUNCH of L2 and therefore expects 
L2 to 
be run (and perhaps be injected with an event it specified, etc.). 
Nested_run_pending 
is especially intended to avoid switching  to L1 in the injection 
decision-point.

I catch this in the queue exception path, this patch fixes it by requesting 
an immediate VM exit from L2 and keeping the exception for L1 pending for a 
subsequent nested VM exit.

Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Radim Krčmář <rkrc...@redhat.com>
Signed-off-by: Wanpeng Li <wanpeng...@hotmail.com>
---
 arch/x86/kvm/vmx.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 21760b8..6f88a79 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2501,16 +2501,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned nr = vcpu->arch.exception.nr;
        bool has_error_code = vcpu->arch.exception.has_error_code;
-       bool reinject = vcpu->arch.exception.injected;
        u32 error_code = vcpu->arch.exception.error_code;
        u32 intr_info = nr | INTR_INFO_VALID_MASK;
-       unsigned long exit_qual;
-
-       if (!reinject && is_guest_mode(vcpu) &&
-           nested_vmx_check_exception(vcpu, &exit_qual)) {
-               nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
-               return;
-       }
 
        if (has_error_code) {
                vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
@@ -10988,10 +10980,20 @@ static void vmcs12_save_pending_event(struct kvm_vcpu 
*vcpu,
 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       unsigned long exit_qual;
 
        if (kvm_event_needs_reinjection(vcpu))
                return -EBUSY;
 
+       if (vcpu->arch.exception.pending &&
+               nested_vmx_check_exception(vcpu, &exit_qual)) {
+               if (vmx->nested.nested_run_pending)
+                       return -EBUSY;
+
+               nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
+               return 0;
+    }
+
        if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
            vmx->nested.preemption_timer_expired) {
                if (vmx->nested.nested_run_pending)
-- 
2.7.4

Reply via email to